PNG  IHDRX cHRMz&u0`:pQ<bKGD pHYsodtIME MeqIDATxw]Wug^Qd˶ 6`!N:!@xI~)%7%@Bh&`lnjVF29gΨ4E$|>cɚ{gk= %,a KX%,a KX%,a KX%,a KX%,a KX%,a KX%, b` ǟzeאfp]<!SJmɤY޲ڿ,%c ~ع9VH.!Ͳz&QynֺTkRR.BLHi٪:l;@(!MԴ=žI,:o&N'Kù\vRmJ雵֫AWic H@" !: Cé||]k-Ha oݜ:y F())u]aG7*JV@J415p=sZH!=!DRʯvɱh~V\}v/GKY$n]"X"}t@ xS76^[bw4dsce)2dU0 CkMa-U5tvLƀ~mlMwfGE/-]7XAƟ`׮g ewxwC4\[~7@O-Q( a*XGƒ{ ՟}$_y3tĐƤatgvێi|K=uVyrŲlLӪuܿzwk$m87k( `múcE)"@rK( z4$D; 2kW=Xb$V[Ru819קR~qloѱDyįݎ*mxw]y5e4K@ЃI0A D@"BDk_)N\8͜9dz"fK0zɿvM /.:2O{ Nb=M=7>??Zuo32 DLD@D| &+֎C #B8ַ`bOb $D#ͮҪtx]%`ES`Ru[=¾!@Od37LJ0!OIR4m]GZRJu$‡c=%~s@6SKy?CeIh:[vR@Lh | (BhAMy=݃  G"'wzn޺~8ԽSh ~T*A:xR[ܹ?X[uKL_=fDȊ؂p0}7=D$Ekq!/t.*2ʼnDbŞ}DijYaȲ(""6HA;:LzxQ‘(SQQ}*PL*fc\s `/d'QXW, e`#kPGZuŞuO{{wm[&NBTiiI0bukcA9<4@SӊH*؎4U/'2U5.(9JuDfrޱtycU%j(:RUbArLֺN)udA':uGQN"-"Is.*+k@ `Ojs@yU/ H:l;@yyTn}_yw!VkRJ4P)~y#)r,D =ě"Q]ci'%HI4ZL0"MJy 8A{ aN<8D"1#IJi >XjX֔#@>-{vN!8tRݻ^)N_╗FJEk]CT՟ YP:_|H1@ CBk]yKYp|og?*dGvzنzӴzjֺNkC~AbZƷ`.H)=!QͷVTT(| u78y֮}|[8-Vjp%2JPk[}ԉaH8Wpqhwr:vWª<}l77_~{s۴V+RCģ%WRZ\AqHifɤL36: #F:p]Bq/z{0CU6ݳEv_^k7'>sq*+kH%a`0ԣisqにtү04gVgW΂iJiS'3w.w}l6MC2uԯ|>JF5`fV5m`Y**Db1FKNttu]4ccsQNnex/87+}xaUW9y>ͯ骵G{䩓Գ3+vU}~jJ.NFRD7<aJDB1#ҳgSb,+CS?/ VG J?|?,2#M9}B)MiE+G`-wo߫V`fio(}S^4e~V4bHOYb"b#E)dda:'?}׮4繏`{7Z"uny-?ǹ;0MKx{:_pÚmFמ:F " .LFQLG)Q8qN q¯¯3wOvxDb\. BKD9_NN &L:4D{mm o^tֽ:q!ƥ}K+<"m78N< ywsard5+вz~mnG)=}lYݧNj'QJS{S :UYS-952?&O-:W}(!6Mk4+>A>j+i|<<|;ر^߉=HE|V#F)Emm#}/"y GII웻Jі94+v뾧xu~5C95~ūH>c@덉pʃ1/4-A2G%7>m;–Y,cyyaln" ?ƻ!ʪ<{~h~i y.zZB̃/,雋SiC/JFMmBH&&FAbϓO^tubbb_hZ{_QZ-sύodFgO(6]TJA˯#`۶ɟ( %$&+V'~hiYy>922 Wp74Zkq+Ovn錄c>8~GqܲcWꂎz@"1A.}T)uiW4="jJ2W7mU/N0gcqܗOO}?9/wìXžΏ0 >֩(V^Rh32!Hj5`;O28؇2#ݕf3 ?sJd8NJ@7O0 b־?lldщ̡&|9C.8RTWwxWy46ah嘦mh٤&l zCy!PY?: CJyв]dm4ǜҐR޻RլhX{FƯanшQI@x' ao(kUUuxW_Ñ줮[w8 FRJ(8˼)_mQ _!RJhm=!cVmm ?sFOnll6Qk}alY}; "baӌ~M0w,Ggw2W:G/k2%R,_=u`WU R.9T"v,<\Ik޽/2110Ӿxc0gyC&Ny޽JҢrV6N ``یeA16"J³+Rj*;BϜkZPJaÍ<Jyw:NP8/D$ 011z֊Ⱳ3ι֘k1V_"h!JPIΣ'ɜ* aEAd:ݺ>y<}Lp&PlRfTb1]o .2EW\ͮ]38؋rTJsǏP@芎sF\> P^+dYJLbJ C-xϐn> ι$nj,;Ǖa FU *择|h ~izť3ᤓ`K'-f tL7JK+vf2)V'-sFuB4i+m+@My=O҈0"|Yxoj,3]:cо3 $#uŘ%Y"y죯LebqtҢVzq¼X)~>4L׶m~[1_k?kxֺQ`\ |ٛY4Ѯr!)N9{56(iNq}O()Em]=F&u?$HypWUeB\k]JɩSع9 Zqg4ZĊo oMcjZBU]B\TUd34ݝ~:7ڶSUsB0Z3srx 7`:5xcx !qZA!;%͚7&P H<WL!džOb5kF)xor^aujƍ7 Ǡ8/p^(L>ὴ-B,{ۇWzֺ^k]3\EE@7>lYBȝR.oHnXO/}sB|.i@ɥDB4tcm,@ӣgdtJ!lH$_vN166L__'Z)y&kH;:,Y7=J 9cG) V\hjiE;gya~%ks_nC~Er er)muuMg2;֫R)Md) ,¶ 2-wr#F7<-BBn~_(o=KO㭇[Xv eN_SMgSҐ BS헃D%g_N:/pe -wkG*9yYSZS.9cREL !k}<4_Xs#FmҶ:7R$i,fi!~' # !6/S6y@kZkZcX)%5V4P]VGYq%H1!;e1MV<!ϐHO021Dp= HMs~~a)ަu7G^];git!Frl]H/L$=AeUvZE4P\.,xi {-~p?2b#amXAHq)MWǾI_r`S Hz&|{ +ʖ_= (YS(_g0a03M`I&'9vl?MM+m~}*xT۲(fY*V4x@29s{DaY"toGNTO+xCAO~4Ϳ;p`Ѫ:>Ҵ7K 3}+0 387x\)a"/E>qpWB=1 ¨"MP(\xp߫́A3+J] n[ʼnӼaTbZUWb={~2ooKױӰp(CS\S筐R*JغV&&"FA}J>G֐p1ٸbk7 ŘH$JoN <8s^yk_[;gy-;߉DV{c B yce% aJhDȶ 2IdйIB/^n0tNtџdcKj4϶v~- CBcgqx9= PJ) dMsjpYB] GD4RDWX +h{y`,3ꊕ$`zj*N^TP4L:Iz9~6s) Ga:?y*J~?OrMwP\](21sZUD ?ܟQ5Q%ggW6QdO+\@ ̪X'GxN @'4=ˋ+*VwN ne_|(/BDfj5(Dq<*tNt1х!MV.C0 32b#?n0pzj#!38}޴o1KovCJ`8ŗ_"]] rDUy޲@ Ȗ-;xџ'^Y`zEd?0„ DAL18IS]VGq\4o !swV7ˣι%4FѮ~}6)OgS[~Q vcYbL!wG3 7띸*E Pql8=jT\꘿I(z<[6OrR8ºC~ډ]=rNl[g|v TMTղb-o}OrP^Q]<98S¤!k)G(Vkwyqyr޽Nv`N/e p/~NAOk \I:G6]4+K;j$R:Mi #*[AȚT,ʰ,;N{HZTGMoּy) ]%dHء9Պ䠬|<45,\=[bƟ8QXeB3- &dҩ^{>/86bXmZ]]yޚN[(WAHL$YAgDKp=5GHjU&99v簪C0vygln*P)9^͞}lMuiH!̍#DoRBn9l@ xA/_v=ȺT{7Yt2N"4!YN`ae >Q<XMydEB`VU}u]嫇.%e^ánE87Mu\t`cP=AD/G)sI"@MP;)]%fH9'FNsj1pVhY&9=0pfuJ&gޤx+k:!r˭wkl03׼Ku C &ѓYt{.O.zҏ z}/tf_wEp2gvX)GN#I ݭ߽v/ .& и(ZF{e"=V!{zW`, ]+LGz"(UJp|j( #V4, 8B 0 9OkRrlɱl94)'VH9=9W|>PS['G(*I1==C<5"Pg+x'K5EMd؞Af8lG ?D FtoB[je?{k3zQ vZ;%Ɠ,]E>KZ+T/ EJxOZ1i #T<@ I}q9/t'zi(EMqw`mYkU6;[t4DPeckeM;H}_g pMww}k6#H㶏+b8雡Sxp)&C $@'b,fPߑt$RbJ'vznuS ~8='72_`{q纶|Q)Xk}cPz9p7O:'|G~8wx(a 0QCko|0ASD>Ip=4Q, d|F8RcU"/KM opKle M3#i0c%<7׿p&pZq[TR"BpqauIp$ 8~Ĩ!8Սx\ւdT>>Z40ks7 z2IQ}ItԀ<-%S⍤};zIb$I 5K}Q͙D8UguWE$Jh )cu4N tZl+[]M4k8֦Zeq֮M7uIqG 1==tLtR,ƜSrHYt&QP윯Lg' I,3@P'}'R˪e/%-Auv·ñ\> vDJzlӾNv5:|K/Jb6KI9)Zh*ZAi`?S {aiVDԲuy5W7pWeQJk֤#5&V<̺@/GH?^τZL|IJNvI:'P=Ϛt"¨=cud S Q.Ki0 !cJy;LJR;G{BJy޺[^8fK6)=yʊ+(k|&xQ2`L?Ȓ2@Mf 0C`6-%pKpm')c$׻K5[J*U[/#hH!6acB JA _|uMvDyk y)6OPYjœ50VT K}cǻP[ $:]4MEA.y)|B)cf-A?(e|lɉ#P9V)[9t.EiQPDѠ3ϴ;E:+Օ t ȥ~|_N2,ZJLt4! %ա]u {+=p.GhNcŞQI?Nd'yeh n7zi1DB)1S | S#ًZs2|Ɛy$F SxeX{7Vl.Src3E℃Q>b6G ўYCmtկ~=K0f(=LrAS GN'ɹ9<\!a`)֕y[uՍ[09` 9 +57ts6}b4{oqd+J5fa/,97J#6yν99mRWxJyѡyu_TJc`~W>l^q#Ts#2"nD1%fS)FU w{ܯ R{ ˎ󅃏џDsZSQS;LV;7 Od1&1n$ N /.q3~eNɪ]E#oM~}v֯FڦwyZ=<<>Xo稯lfMFV6p02|*=tV!c~]fa5Y^Q_WN|Vs 0ҘދU97OI'N2'8N֭fgg-}V%y]U4 峧p*91#9U kCac_AFңĪy뚇Y_AiuYyTTYЗ-(!JFLt›17uTozc. S;7A&&<ԋ5y;Ro+:' *eYJkWR[@F %SHWP 72k4 qLd'J "zB6{AC0ƁA6U.'F3:Ȅ(9ΜL;D]m8ڥ9}dU "v!;*13Rg^fJyShyy5auA?ɩGHRjo^]׽S)Fm\toy 4WQS@mE#%5ʈfFYDX ~D5Ϡ9tE9So_aU4?Ѽm%&c{n>.KW1Tlb}:j uGi(JgcYj0qn+>) %\!4{LaJso d||u//P_y7iRJ߬nHOy) l+@$($VFIQ9%EeKʈU. ia&FY̒mZ=)+qqoQn >L!qCiDB;Y<%} OgBxB!ØuG)WG9y(Ą{_yesuZmZZey'Wg#C~1Cev@0D $a@˲(.._GimA:uyw֬%;@!JkQVM_Ow:P.s\)ot- ˹"`B,e CRtaEUP<0'}r3[>?G8xU~Nqu;Wm8\RIkբ^5@k+5(By'L&'gBJ3ݶ!/㮻w҅ yqPWUg<e"Qy*167΃sJ\oz]T*UQ<\FԎ`HaNmڜ6DysCask8wP8y9``GJ9lF\G g's Nn͵MLN֪u$| /|7=]O)6s !ĴAKh]q_ap $HH'\1jB^s\|- W1:=6lJBqjY^LsPk""`]w)󭃈,(HC ?䔨Y$Sʣ{4Z+0NvQkhol6C.婧/u]FwiVjZka&%6\F*Ny#8O,22+|Db~d ~Çwc N:FuuCe&oZ(l;@ee-+Wn`44AMK➝2BRՈt7g*1gph9N) *"TF*R(#'88pm=}X]u[i7bEc|\~EMn}P瘊J)K.0i1M6=7'_\kaZ(Th{K*GJyytw"IO-PWJk)..axӝ47"89Cc7ĐBiZx 7m!fy|ϿF9CbȩV 9V-՛^pV̌ɄS#Bv4-@]Vxt-Z, &ֺ*diؠ2^VXbs֔Ìl.jQ]Y[47gj=幽ex)A0ip׳ W2[ᎇhuE^~q흙L} #-b۸oFJ_QP3r6jr+"nfzRJTUqoaۍ /$d8Mx'ݓ= OՃ| )$2mcM*cЙj}f };n YG w0Ia!1Q.oYfr]DyISaP}"dIӗթO67jqR ҊƐƈaɤGG|h;t]䗖oSv|iZqX)oalv;۩meEJ\!8=$4QU4Xo&VEĊ YS^E#d,yX_> ۘ-e\ "Wa6uLĜZi`aD9.% w~mB(02G[6y.773a7 /=o7D)$Z 66 $bY^\CuP. (x'"J60׿Y:Oi;F{w佩b+\Yi`TDWa~|VH)8q/=9!g߆2Y)?ND)%?Ǐ`k/sn:;O299yB=a[Ng 3˲N}vLNy;*?x?~L&=xyӴ~}q{qE*IQ^^ͧvü{Huu=R|>JyUlZV, B~/YF!Y\u_ݼF{_C)LD]m {H 0ihhadd nUkf3oٺCvE\)QJi+֥@tDJkB$1!Đr0XQ|q?d2) Ӣ_}qv-< FŊ߫%roppVBwü~JidY4:}L6M7f٬F "?71<2#?Jyy4뷢<_a7_=Q E=S1И/9{+93֮E{ǂw{))?maÆm(uLE#lïZ  ~d];+]h j?!|$F}*"4(v'8s<ŏUkm7^7no1w2ؗ}TrͿEk>p'8OB7d7R(A 9.*Mi^ͳ; eeUwS+C)uO@ =Sy]` }l8^ZzRXj[^iUɺ$tj))<sbDJfg=Pk_{xaKo1:-uyG0M ԃ\0Lvuy'ȱc2Ji AdyVgVh!{]/&}}ċJ#%d !+87<;qN޼Nفl|1N:8ya  8}k¾+-$4FiZYÔXk*I&'@iI99)HSh4+2G:tGhS^繿 Kتm0 вDk}֚+QT4;sC}rՅE,8CX-e~>G&'9xpW,%Fh,Ry56Y–hW-(v_,? ; qrBk4-V7HQ;ˇ^Gv1JVV%,ik;D_W!))+BoS4QsTM;gt+ndS-~:11Sgv!0qRVh!"Ȋ(̦Yl.]PQWgٳE'`%W1{ndΗBk|Ž7ʒR~,lnoa&:ü$ 3<a[CBݮwt"o\ePJ=Hz"_c^Z.#ˆ*x z̝grY]tdkP*:97YľXyBkD4N.C_[;F9`8& !AMO c `@BA& Ost\-\NX+Xp < !bj3C&QL+*&kAQ=04}cC!9~820G'PC9xa!w&bo_1 Sw"ܱ V )Yl3+ס2KoXOx]"`^WOy :3GO0g;%Yv㐫(R/r (s } u B &FeYZh0y> =2<Ϟc/ -u= c&׭,.0"g"7 6T!vl#sc>{u/Oh Bᾈ)۴74]x7 gMӒ"d]U)}" v4co[ ɡs 5Gg=XR14?5A}D "b{0$L .\4y{_fe:kVS\\O]c^W52LSBDM! C3Dhr̦RtArx4&agaN3Cf<Ԉp4~ B'"1@.b_/xQ} _߃҉/gٓ2Qkqp0շpZ2fԫYz< 4L.Cyυι1t@鎫Fe sYfsF}^ V}N<_`p)alٶ "(XEAVZ<)2},:Ir*#m_YӼ R%a||EƼIJ,,+f"96r/}0jE/)s)cjW#w'Sʯ5<66lj$a~3Kʛy 2:cZ:Yh))+a߭K::N,Q F'qB]={.]h85C9cr=}*rk?vwV렵ٸW Rs%}rNAkDv|uFLBkWY YkX מ|)1!$#3%y?pF<@<Rr0}: }\J [5FRxY<9"SQdE(Q*Qʻ)q1E0B_O24[U'],lOb ]~WjHޏTQ5Syu wq)xnw8~)c 쫬gٲߠ H% k5dƝk> kEj,0% b"vi2Wس_CuK)K{n|>t{P1򨾜j>'kEkƗBg*H%'_aY6Bn!TL&ɌOb{c`'d^{t\i^[uɐ[}q0lM˕G:‚4kb祔c^:?bpg… +37stH:0}en6x˟%/<]BL&* 5&fK9Mq)/iyqtA%kUe[ڛKN]Ě^,"`/ s[EQQm?|XJ߅92m]G.E΃ח U*Cn.j_)Tѧj̿30ڇ!A0=͜ar I3$C^-9#|pk!)?7.x9 @OO;WƝZBFU keZ75F6Tc6"ZȚs2y/1 ʵ:u4xa`C>6Rb/Yм)^=+~uRd`/|_8xbB0?Ft||Z\##|K 0>>zxv8۴吅q 8ĥ)"6>~\8:qM}#͚'ĉ#p\׶ l#bA?)|g g9|8jP(cr,BwV (WliVxxᡁ@0Okn;ɥh$_ckCgriv}>=wGzβ KkBɛ[˪ !J)h&k2%07δt}!d<9;I&0wV/ v 0<H}L&8ob%Hi|޶o&h1L|u֦y~󛱢8fٲUsւ)0oiFx2}X[zVYr_;N(w]_4B@OanC?gĦx>мgx>ΛToZoOMp>40>V Oy V9iq!4 LN,ˢu{jsz]|"R޻&'ƚ{53ўFu(<٪9:΋]B;)B>1::8;~)Yt|0(pw2N%&X,URBK)3\zz&}ax4;ǟ(tLNg{N|Ǽ\G#C9g$^\}p?556]/RP.90 k,U8/u776s ʪ_01چ|\N 0VV*3H鴃J7iI!wG_^ypl}r*jɤSR 5QN@ iZ#1ٰy;_\3\BQQ x:WJv츟ٯ$"@6 S#qe딇(/P( Dy~TOϻ<4:-+F`0||;Xl-"uw$Цi󼕝mKʩorz"mϺ$F:~E'ҐvD\y?Rr8_He@ e~O,T.(ފR*cY^m|cVR[8 JҡSm!ΆԨb)RHG{?MpqrmN>߶Y)\p,d#xۆWY*,l6]v0h15M˙MS8+EdI='LBJIH7_9{Caз*Lq,dt >+~ّeʏ?xԕ4bBAŚjﵫ!'\Ը$WNvKO}ӽmSşذqsOy?\[,d@'73'j%kOe`1.g2"e =YIzS2|zŐƄa\U,dP;jhhhaxǶ?КZ՚.q SE+XrbOu%\GتX(H,N^~]JyEZQKceTQ]VGYqnah;y$cQahT&QPZ*iZ8UQQM.qo/T\7X"u?Mttl2Xq(IoW{R^ ux*SYJ! 4S.Jy~ BROS[V|žKNɛP(L6V^|cR7i7nZW1Fd@ Ara{詑|(T*dN]Ko?s=@ |_EvF]׍kR)eBJc" MUUbY6`~V޴dJKß&~'d3i WWWWWW
Current Directory: /opt/imh-python/lib/python3.9/site-packages/pygments/lexers
Viewing File: /opt/imh-python/lib/python3.9/site-packages/pygments/lexers/robotframework.py
""" pygments.lexers.robotframework ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for Robot Framework. :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # Copyright 2012 Nokia Siemens Networks Oyj # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from pygments.lexer import Lexer from pygments.token import Token __all__ = ['RobotFrameworkLexer'] HEADING = Token.Generic.Heading SETTING = Token.Keyword.Namespace IMPORT = Token.Name.Namespace TC_KW_NAME = Token.Generic.Subheading KEYWORD = Token.Name.Function ARGUMENT = Token.String VARIABLE = Token.Name.Variable COMMENT = Token.Comment SEPARATOR = Token.Punctuation SYNTAX = Token.Punctuation GHERKIN = Token.Generic.Emph ERROR = Token.Error def normalize(string, remove=''): string = string.lower() for char in remove + ' ': if char in string: string = string.replace(char, '') return string class RobotFrameworkLexer(Lexer): """ For Robot Framework test data. Supports both space and pipe separated plain text formats. """ name = 'RobotFramework' url = 'http://robotframework.org' aliases = ['robotframework'] filenames = ['*.robot', '*.resource'] mimetypes = ['text/x-robotframework'] version_added = '1.6' def __init__(self, **options): options['tabsize'] = 2 options['encoding'] = 'UTF-8' Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): row_tokenizer = RowTokenizer() var_tokenizer = VariableTokenizer() index = 0 for row in text.splitlines(): for value, token in row_tokenizer.tokenize(row): for value, token in var_tokenizer.tokenize(value, token): if value: yield index, token, str(value) index += len(value) class VariableTokenizer: def tokenize(self, string, token): var = VariableSplitter(string, identifiers='$@%&') if var.start < 0 or token in (COMMENT, ERROR): yield string, token return for value, token in self._tokenize(var, string, token): if value: yield value, token def _tokenize(self, var, string, orig_token): before = string[:var.start] yield before, orig_token yield var.identifier + '{', SYNTAX yield from self.tokenize(var.base, VARIABLE) yield '}', SYNTAX if var.index is not None: yield '[', SYNTAX yield from self.tokenize(var.index, VARIABLE) yield ']', SYNTAX yield from self.tokenize(string[var.end:], orig_token) class RowTokenizer: def __init__(self): self._table = UnknownTable() self._splitter = RowSplitter() testcases = TestCaseTable() settings = SettingTable(testcases.set_default_template) variables = VariableTable() keywords = KeywordTable() self._tables = {'settings': settings, 'setting': settings, 'metadata': settings, 'variables': variables, 'variable': variables, 'testcases': testcases, 'testcase': testcases, 'tasks': testcases, 'task': testcases, 'keywords': keywords, 'keyword': keywords, 'userkeywords': keywords, 'userkeyword': keywords} def tokenize(self, row): commented = False heading = False for index, value in enumerate(self._splitter.split(row)): # First value, and every second after that, is a separator. index, separator = divmod(index-1, 2) if value.startswith('#'): commented = True elif index == 0 and value.startswith('*'): self._table = self._start_table(value) heading = True yield from self._tokenize(value, index, commented, separator, heading) self._table.end_row() def _start_table(self, header): name = normalize(header, remove='*') return self._tables.get(name, UnknownTable()) def _tokenize(self, value, index, commented, separator, heading): if commented: yield value, COMMENT elif separator: yield value, SEPARATOR elif heading: yield value, HEADING else: yield from self._table.tokenize(value, index) class RowSplitter: _space_splitter = re.compile('( {2,})') _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))') def split(self, row): splitter = (row.startswith('| ') and self._split_from_pipes or self._split_from_spaces) yield from splitter(row) yield '\n' def _split_from_spaces(self, row): yield '' # Start with (pseudo)separator similarly as with pipes yield from self._space_splitter.split(row) def _split_from_pipes(self, row): _, separator, rest = self._pipe_splitter.split(row, 1) yield separator while self._pipe_splitter.search(rest): cell, separator, rest = self._pipe_splitter.split(rest, 1) yield cell yield separator yield rest class Tokenizer: _tokens = None def __init__(self): self._index = 0 def tokenize(self, value): values_and_tokens = self._tokenize(value, self._index) self._index += 1 if isinstance(values_and_tokens, type(Token)): values_and_tokens = [(value, values_and_tokens)] return values_and_tokens def _tokenize(self, value, index): index = min(index, len(self._tokens) - 1) return self._tokens[index] def _is_assign(self, value): if value.endswith('='): value = value[:-1].strip() var = VariableSplitter(value, identifiers='$@&') return var.start == 0 and var.end == len(value) class Comment(Tokenizer): _tokens = (COMMENT,) class Setting(Tokenizer): _tokens = (SETTING, ARGUMENT) _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown', 'suitepostcondition', 'testsetup', 'tasksetup', 'testprecondition', 'testteardown','taskteardown', 'testpostcondition', 'testtemplate', 'tasktemplate') _import_settings = ('library', 'resource', 'variables') _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags', 'testtimeout','tasktimeout') _custom_tokenizer = None def __init__(self, template_setter=None): Tokenizer.__init__(self) self._template_setter = template_setter def _tokenize(self, value, index): if index == 1 and self._template_setter: self._template_setter(value) if index == 0: normalized = normalize(value) if normalized in self._keyword_settings: self._custom_tokenizer = KeywordCall(support_assign=False) elif normalized in self._import_settings: self._custom_tokenizer = ImportSetting() elif normalized not in self._other_settings: return ERROR elif self._custom_tokenizer: return self._custom_tokenizer.tokenize(value) return Tokenizer._tokenize(self, value, index) class ImportSetting(Tokenizer): _tokens = (IMPORT, ARGUMENT) class TestCaseSetting(Setting): _keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition', 'template') _import_settings = () _other_settings = ('documentation', 'tags', 'timeout') def _tokenize(self, value, index): if index == 0: type = Setting._tokenize(self, value[1:-1], index) return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)] return Setting._tokenize(self, value, index) class KeywordSetting(TestCaseSetting): _keyword_settings = ('teardown',) _other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags') class Variable(Tokenizer): _tokens = (SYNTAX, ARGUMENT) def _tokenize(self, value, index): if index == 0 and not self._is_assign(value): return ERROR return Tokenizer._tokenize(self, value, index) class KeywordCall(Tokenizer): _tokens = (KEYWORD, ARGUMENT) def __init__(self, support_assign=True): Tokenizer.__init__(self) self._keyword_found = not support_assign self._assigns = 0 def _tokenize(self, value, index): if not self._keyword_found and self._is_assign(value): self._assigns += 1 return SYNTAX # VariableTokenizer tokenizes this later. if self._keyword_found: return Tokenizer._tokenize(self, value, index - self._assigns) self._keyword_found = True return GherkinTokenizer().tokenize(value, KEYWORD) class GherkinTokenizer: _gherkin_prefix = re.compile('^(Given|When|Then|And|But) ', re.IGNORECASE) def tokenize(self, value, token): match = self._gherkin_prefix.match(value) if not match: return [(value, token)] end = match.end() return [(value[:end], GHERKIN), (value[end:], token)] class TemplatedKeywordCall(Tokenizer): _tokens = (ARGUMENT,) class ForLoop(Tokenizer): def __init__(self): Tokenizer.__init__(self) self._in_arguments = False def _tokenize(self, value, index): token = self._in_arguments and ARGUMENT or SYNTAX if value.upper() in ('IN', 'IN RANGE'): self._in_arguments = True return token class _Table: _tokenizer_class = None def __init__(self, prev_tokenizer=None): self._tokenizer = self._tokenizer_class() self._prev_tokenizer = prev_tokenizer self._prev_values_on_row = [] def tokenize(self, value, index): if self._continues(value, index): self._tokenizer = self._prev_tokenizer yield value, SYNTAX else: yield from self._tokenize(value, index) self._prev_values_on_row.append(value) def _continues(self, value, index): return value == '...' and all(self._is_empty(t) for t in self._prev_values_on_row) def _is_empty(self, value): return value in ('', '\\') def _tokenize(self, value, index): return self._tokenizer.tokenize(value) def end_row(self): self.__init__(prev_tokenizer=self._tokenizer) class UnknownTable(_Table): _tokenizer_class = Comment def _continues(self, value, index): return False class VariableTable(_Table): _tokenizer_class = Variable class SettingTable(_Table): _tokenizer_class = Setting def __init__(self, template_setter, prev_tokenizer=None): _Table.__init__(self, prev_tokenizer) self._template_setter = template_setter def _tokenize(self, value, index): if index == 0 and normalize(value) == 'testtemplate': self._tokenizer = Setting(self._template_setter) return _Table._tokenize(self, value, index) def end_row(self): self.__init__(self._template_setter, prev_tokenizer=self._tokenizer) class TestCaseTable(_Table): _setting_class = TestCaseSetting _test_template = None _default_template = None @property def _tokenizer_class(self): if self._test_template or (self._default_template and self._test_template is not False): return TemplatedKeywordCall return KeywordCall def _continues(self, value, index): return index > 0 and _Table._continues(self, value, index) def _tokenize(self, value, index): if index == 0: if value: self._test_template = None return GherkinTokenizer().tokenize(value, TC_KW_NAME) if index == 1 and self._is_setting(value): if self._is_template(value): self._test_template = False self._tokenizer = self._setting_class(self.set_test_template) else: self._tokenizer = self._setting_class() if index == 1 and self._is_for_loop(value): self._tokenizer = ForLoop() if index == 1 and self._is_empty(value): return [(value, SYNTAX)] return _Table._tokenize(self, value, index) def _is_setting(self, value): return value.startswith('[') and value.endswith(']') def _is_template(self, value): return normalize(value) == '[template]' def _is_for_loop(self, value): return value.startswith(':') and normalize(value, remove=':') == 'for' def set_test_template(self, template): self._test_template = self._is_template_set(template) def set_default_template(self, template): self._default_template = self._is_template_set(template) def _is_template_set(self, template): return normalize(template) not in ('', '\\', 'none', '${empty}') class KeywordTable(TestCaseTable): _tokenizer_class = KeywordCall _setting_class = KeywordSetting def _is_template(self, value): return False # Following code copied directly from Robot Framework 2.7.5. class VariableSplitter: def __init__(self, string, identifiers): self.identifier = None self.base = None self.index = None self.start = -1 self.end = -1 self._identifiers = identifiers self._may_have_internal_variables = False try: self._split(string) except ValueError: pass else: self._finalize() def get_replaced_base(self, variables): if self._may_have_internal_variables: return variables.replace_string(self.base) return self.base def _finalize(self): self.identifier = self._variable_chars[0] self.base = ''.join(self._variable_chars[2:-1]) self.end = self.start + len(self._variable_chars) if self._has_list_or_dict_variable_index(): self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1]) self.end += len(self._list_and_dict_variable_index_chars) def _has_list_or_dict_variable_index(self): return self._list_and_dict_variable_index_chars\ and self._list_and_dict_variable_index_chars[-1] == ']' def _split(self, string): start_index, max_index = self._find_variable(string) self.start = start_index self._open_curly = 1 self._state = self._variable_state self._variable_chars = [string[start_index], '{'] self._list_and_dict_variable_index_chars = [] self._string = string start_index += 2 for index, char in enumerate(string[start_index:]): index += start_index # Giving start to enumerate only in Py 2.6+ try: self._state(char, index) except StopIteration: return if index == max_index and not self._scanning_list_variable_index(): return def _scanning_list_variable_index(self): return self._state in [self._waiting_list_variable_index_state, self._list_variable_index_state] def _find_variable(self, string): max_end_index = string.rfind('}') if max_end_index == -1: raise ValueError('No variable end found') if self._is_escaped(string, max_end_index): return self._find_variable(string[:max_end_index]) start_index = self._find_start_index(string, 1, max_end_index) if start_index == -1: raise ValueError('No variable start found') return start_index, max_end_index def _find_start_index(self, string, start, end): index = string.find('{', start, end) - 1 if index < 0: return -1 if self._start_index_is_ok(string, index): return index return self._find_start_index(string, index+2, end) def _start_index_is_ok(self, string, index): return string[index] in self._identifiers\ and not self._is_escaped(string, index) def _is_escaped(self, string, index): escaped = False while index > 0 and string[index-1] == '\\': index -= 1 escaped = not escaped return escaped def _variable_state(self, char, index): self._variable_chars.append(char) if char == '}' and not self._is_escaped(self._string, index): self._open_curly -= 1 if self._open_curly == 0: if not self._is_list_or_dict_variable(): raise StopIteration self._state = self._waiting_list_variable_index_state elif char in self._identifiers: self._state = self._internal_variable_start_state def _is_list_or_dict_variable(self): return self._variable_chars[0] in ('@','&') def _internal_variable_start_state(self, char, index): self._state = self._variable_state if char == '{': self._variable_chars.append(char) self._open_curly += 1 self._may_have_internal_variables = True else: self._variable_state(char, index) def _waiting_list_variable_index_state(self, char, index): if char != '[': raise StopIteration self._list_and_dict_variable_index_chars.append(char) self._state = self._list_variable_index_state def _list_variable_index_state(self, char, index): self._list_and_dict_variable_index_chars.append(char) if char == ']': raise StopIteration