PNG  IHDRX cHRMz&u0`:pQ<bKGD pHYsodtIME MeqIDATxw]Wug^Qd˶ 6`!N:!@xI~)%7%@Bh&`lnjVF29gΨ4E$|>cɚ{gk= %,a KX%,a KX%,a KX%,a KX%,a KX%,a KX%, b` ǟzeאfp]<!SJmɤY޲ڿ,%c ~ع9VH.!Ͳz&QynֺTkRR.BLHi٪:l;@(!MԴ=žI,:o&N'Kù\vRmJ雵֫AWic H@" !: Cé||]k-Ha oݜ:y F())u]aG7*JV@J415p=sZH!=!DRʯvɱh~V\}v/GKY$n]"X"}t@ xS76^[bw4dsce)2dU0 CkMa-U5tvLƀ~mlMwfGE/-]7XAƟ`׮g ewxwC4\[~7@O-Q( a*XGƒ{ ՟}$_y3tĐƤatgvێi|K=uVyrŲlLӪuܿzwk$m87k( `múcE)"@rK( z4$D; 2kW=Xb$V[Ru819קR~qloѱDyįݎ*mxw]y5e4K@ЃI0A D@"BDk_)N\8͜9dz"fK0zɿvM /.:2O{ Nb=M=7>??Zuo32 DLD@D| &+֎C #B8ַ`bOb $D#ͮҪtx]%`ES`Ru[=¾!@Od37LJ0!OIR4m]GZRJu$‡c=%~s@6SKy?CeIh:[vR@Lh | (BhAMy=݃  G"'wzn޺~8ԽSh ~T*A:xR[ܹ?X[uKL_=fDȊ؂p0}7=D$Ekq!/t.*2ʼnDbŞ}DijYaȲ(""6HA;:LzxQ‘(SQQ}*PL*fc\s `/d'QXW, e`#kPGZuŞuO{{wm[&NBTiiI0bukcA9<4@SӊH*؎4U/'2U5.(9JuDfrޱtycU%j(:RUbArLֺN)udA':uGQN"-"Is.*+k@ `Ojs@yU/ H:l;@yyTn}_yw!VkRJ4P)~y#)r,D =ě"Q]ci'%HI4ZL0"MJy 8A{ aN<8D"1#IJi >XjX֔#@>-{vN!8tRݻ^)N_╗FJEk]CT՟ YP:_|H1@ CBk]yKYp|og?*dGvzنzӴzjֺNkC~AbZƷ`.H)=!QͷVTT(| u78y֮}|[8-Vjp%2JPk[}ԉaH8Wpqhwr:vWª<}l77_~{s۴V+RCģ%WRZ\AqHifɤL36: #F:p]Bq/z{0CU6ݳEv_^k7'>sq*+kH%a`0ԣisqにtү04gVgW΂iJiS'3w.w}l6MC2uԯ|>JF5`fV5m`Y**Db1FKNttu]4ccsQNnex/87+}xaUW9y>ͯ骵G{䩓Գ3+vU}~jJ.NFRD7<aJDB1#ҳgSb,+CS?/ VG J?|?,2#M9}B)MiE+G`-wo߫V`fio(}S^4e~V4bHOYb"b#E)dda:'?}׮4繏`{7Z"uny-?ǹ;0MKx{:_pÚmFמ:F " .LFQLG)Q8qN q¯¯3wOvxDb\. BKD9_NN &L:4D{mm o^tֽ:q!ƥ}K+<"m78N< ywsard5+вz~mnG)=}lYݧNj'QJS{S :UYS-952?&O-:W}(!6Mk4+>A>j+i|<<|;ر^߉=HE|V#F)Emm#}/"y GII웻Jі94+v뾧xu~5C95~ūH>c@덉pʃ1/4-A2G%7>m;–Y,cyyaln" ?ƻ!ʪ<{~h~i y.zZB̃/,雋SiC/JFMmBH&&FAbϓO^tubbb_hZ{_QZ-sύodFgO(6]TJA˯#`۶ɟ( %$&+V'~hiYy>922 Wp74Zkq+Ovn錄c>8~GqܲcWꂎz@"1A.}T)uiW4="jJ2W7mU/N0gcqܗOO}?9/wìXžΏ0 >֩(V^Rh32!Hj5`;O28؇2#ݕf3 ?sJd8NJ@7O0 b־?lldщ̡&|9C.8RTWwxWy46ah嘦mh٤&l zCy!PY?: CJyв]dm4ǜҐR޻RլhX{FƯanшQI@x' ao(kUUuxW_Ñ줮[w8 FRJ(8˼)_mQ _!RJhm=!cVmm ?sFOnll6Qk}alY}; "baӌ~M0w,Ggw2W:G/k2%R,_=u`WU R.9T"v,<\Ik޽/2110Ӿxc0gyC&Ny޽JҢrV6N ``یeA16"J³+Rj*;BϜkZPJaÍ<Jyw:NP8/D$ 011z֊Ⱳ3ι֘k1V_"h!JPIΣ'ɜ* aEAd:ݺ>y<}Lp&PlRfTb1]o .2EW\ͮ]38؋rTJsǏP@芎sF\> P^+dYJLbJ C-xϐn> ι$nj,;Ǖa FU *择|h ~izť3ᤓ`K'-f tL7JK+vf2)V'-sFuB4i+m+@My=O҈0"|Yxoj,3]:cо3 $#uŘ%Y"y죯LebqtҢVzq¼X)~>4L׶m~[1_k?kxֺQ`\ |ٛY4Ѯr!)N9{56(iNq}O()Em]=F&u?$HypWUeB\k]JɩSع9 Zqg4ZĊo oMcjZBU]B\TUd34ݝ~:7ڶSUsB0Z3srx 7`:5xcx !qZA!;%͚7&P H<WL!džOb5kF)xor^aujƍ7 Ǡ8/p^(L>ὴ-B,{ۇWzֺ^k]3\EE@7>lYBȝR.oHnXO/}sB|.i@ɥDB4tcm,@ӣgdtJ!lH$_vN166L__'Z)y&kH;:,Y7=J 9cG) V\hjiE;gya~%ks_nC~Er er)muuMg2;֫R)Md) ,¶ 2-wr#F7<-BBn~_(o=KO㭇[Xv eN_SMgSҐ BS헃D%g_N:/pe -wkG*9yYSZS.9cREL !k}<4_Xs#FmҶ:7R$i,fi!~' # !6/S6y@kZkZcX)%5V4P]VGYq%H1!;e1MV<!ϐHO021Dp= HMs~~a)ަu7G^];git!Frl]H/L$=AeUvZE4P\.,xi {-~p?2b#amXAHq)MWǾI_r`S Hz&|{ +ʖ_= (YS(_g0a03M`I&'9vl?MM+m~}*xT۲(fY*V4x@29s{DaY"toGNTO+xCAO~4Ϳ;p`Ѫ:>Ҵ7K 3}+0 387x\)a"/E>qpWB=1 ¨"MP(\xp߫́A3+J] n[ʼnӼaTbZUWb={~2ooKױӰp(CS\S筐R*JغV&&"FA}J>G֐p1ٸbk7 ŘH$JoN <8s^yk_[;gy-;߉DV{c B yce% aJhDȶ 2IdйIB/^n0tNtџdcKj4϶v~- CBcgqx9= PJ) dMsjpYB] GD4RDWX +h{y`,3ꊕ$`zj*N^TP4L:Iz9~6s) Ga:?y*J~?OrMwP\](21sZUD ?ܟQ5Q%ggW6QdO+\@ ̪X'GxN @'4=ˋ+*VwN ne_|(/BDfj5(Dq<*tNt1х!MV.C0 32b#?n0pzj#!38}޴o1KovCJ`8ŗ_"]] rDUy޲@ Ȗ-;xџ'^Y`zEd?0„ DAL18IS]VGq\4o !swV7ˣι%4FѮ~}6)OgS[~Q vcYbL!wG3 7띸*E Pql8=jT\꘿I(z<[6OrR8ºC~ډ]=rNl[g|v TMTղb-o}OrP^Q]<98S¤!k)G(Vkwyqyr޽Nv`N/e p/~NAOk \I:G6]4+K;j$R:Mi #*[AȚT,ʰ,;N{HZTGMoּy) ]%dHء9Պ䠬|<45,\=[bƟ8QXeB3- &dҩ^{>/86bXmZ]]yޚN[(WAHL$YAgDKp=5GHjU&99v簪C0vygln*P)9^͞}lMuiH!̍#DoRBn9l@ xA/_v=ȺT{7Yt2N"4!YN`ae >Q<XMydEB`VU}u]嫇.%e^ánE87Mu\t`cP=AD/G)sI"@MP;)]%fH9'FNsj1pVhY&9=0pfuJ&gޤx+k:!r˭wkl03׼Ku C &ѓYt{.O.zҏ z}/tf_wEp2gvX)GN#I ݭ߽v/ .& и(ZF{e"=V!{zW`, ]+LGz"(UJp|j( #V4, 8B 0 9OkRrlɱl94)'VH9=9W|>PS['G(*I1==C<5"Pg+x'K5EMd؞Af8lG ?D FtoB[je?{k3zQ vZ;%Ɠ,]E>KZ+T/ EJxOZ1i #T<@ I}q9/t'zi(EMqw`mYkU6;[t4DPeckeM;H}_g pMww}k6#H㶏+b8雡Sxp)&C $@'b,fPߑt$RbJ'vznuS ~8='72_`{q纶|Q)Xk}cPz9p7O:'|G~8wx(a 0QCko|0ASD>Ip=4Q, d|F8RcU"/KM opKle M3#i0c%<7׿p&pZq[TR"BpqauIp$ 8~Ĩ!8Սx\ւdT>>Z40ks7 z2IQ}ItԀ<-%S⍤};zIb$I 5K}Q͙D8UguWE$Jh )cu4N tZl+[]M4k8֦Zeq֮M7uIqG 1==tLtR,ƜSrHYt&QP윯Lg' I,3@P'}'R˪e/%-Auv·ñ\> vDJzlӾNv5:|K/Jb6KI9)Zh*ZAi`?S {aiVDԲuy5W7pWeQJk֤#5&V<̺@/GH?^τZL|IJNvI:'P=Ϛt"¨=cud S Q.Ki0 !cJy;LJR;G{BJy޺[^8fK6)=yʊ+(k|&xQ2`L?Ȓ2@Mf 0C`6-%pKpm')c$׻K5[J*U[/#hH!6acB JA _|uMvDyk y)6OPYjœ50VT K}cǻP[ $:]4MEA.y)|B)cf-A?(e|lɉ#P9V)[9t.EiQPDѠ3ϴ;E:+Օ t ȥ~|_N2,ZJLt4! %ա]u {+=p.GhNcŞQI?Nd'yeh n7zi1DB)1S | S#ًZs2|Ɛy$F SxeX{7Vl.Src3E℃Q>b6G ўYCmtկ~=K0f(=LrAS GN'ɹ9<\!a`)֕y[uՍ[09` 9 +57ts6}b4{oqd+J5fa/,97J#6yν99mRWxJyѡyu_TJc`~W>l^q#Ts#2"nD1%fS)FU w{ܯ R{ ˎ󅃏џDsZSQS;LV;7 Od1&1n$ N /.q3~eNɪ]E#oM~}v֯FڦwyZ=<<>Xo稯lfMFV6p02|*=tV!c~]fa5Y^Q_WN|Vs 0ҘދU97OI'N2'8N֭fgg-}V%y]U4 峧p*91#9U kCac_AFңĪy뚇Y_AiuYyTTYЗ-(!JFLt›17uTozc. S;7A&&<ԋ5y;Ro+:' *eYJkWR[@F %SHWP 72k4 qLd'J "zB6{AC0ƁA6U.'F3:Ȅ(9ΜL;D]m8ڥ9}dU "v!;*13Rg^fJyShyy5auA?ɩGHRjo^]׽S)Fm\toy 4WQS@mE#%5ʈfFYDX ~D5Ϡ9tE9So_aU4?Ѽm%&c{n>.KW1Tlb}:j uGi(JgcYj0qn+>) %\!4{LaJso d||u//P_y7iRJ߬nHOy) l+@$($VFIQ9%EeKʈU. ia&FY̒mZ=)+qqoQn >L!qCiDB;Y<%} OgBxB!ØuG)WG9y(Ą{_yesuZmZZey'Wg#C~1Cev@0D $a@˲(.._GimA:uyw֬%;@!JkQVM_Ow:P.s\)ot- ˹"`B,e CRtaEUP<0'}r3[>?G8xU~Nqu;Wm8\RIkբ^5@k+5(By'L&'gBJ3ݶ!/㮻w҅ yqPWUg<e"Qy*167΃sJ\oz]T*UQ<\FԎ`HaNmڜ6DysCask8wP8y9``GJ9lF\G g's Nn͵MLN֪u$| /|7=]O)6s !ĴAKh]q_ap $HH'\1jB^s\|- W1:=6lJBqjY^LsPk""`]w)󭃈,(HC ?䔨Y$Sʣ{4Z+0NvQkhol6C.婧/u]FwiVjZka&%6\F*Ny#8O,22+|Db~d ~Çwc N:FuuCe&oZ(l;@ee-+Wn`44AMK➝2BRՈt7g*1gph9N) *"TF*R(#'88pm=}X]u[i7bEc|\~EMn}P瘊J)K.0i1M6=7'_\kaZ(Th{K*GJyytw"IO-PWJk)..axӝ47"89Cc7ĐBiZx 7m!fy|ϿF9CbȩV 9V-՛^pV̌ɄS#Bv4-@]Vxt-Z, &ֺ*diؠ2^VXbs֔Ìl.jQ]Y[47gj=幽ex)A0ip׳ W2[ᎇhuE^~q흙L} #-b۸oFJ_QP3r6jr+"nfzRJTUqoaۍ /$d8Mx'ݓ= OՃ| )$2mcM*cЙj}f };n YG w0Ia!1Q.oYfr]DyISaP}"dIӗթO67jqR ҊƐƈaɤGG|h;t]䗖oSv|iZqX)oalv;۩meEJ\!8=$4QU4Xo&VEĊ YS^E#d,yX_> ۘ-e\ "Wa6uLĜZi`aD9.% w~mB(02G[6y.773a7 /=o7D)$Z 66 $bY^\CuP. (x'"J60׿Y:Oi;F{w佩b+\Yi`TDWa~|VH)8q/=9!g߆2Y)?ND)%?Ǐ`k/sn:;O299yB=a[Ng 3˲N}vLNy;*?x?~L&=xyӴ~}q{qE*IQ^^ͧvü{Huu=R|>JyUlZV, B~/YF!Y\u_ݼF{_C)LD]m {H 0ihhadd nUkf3oٺCvE\)QJi+֥@tDJkB$1!Đr0XQ|q?d2) Ӣ_}qv-< FŊ߫%roppVBwü~JidY4:}L6M7f٬F "?71<2#?Jyy4뷢<_a7_=Q E=S1И/9{+93֮E{ǂw{))?maÆm(uLE#lïZ  ~d];+]h j?!|$F}*"4(v'8s<ŏUkm7^7no1w2ؗ}TrͿEk>p'8OB7d7R(A 9.*Mi^ͳ; eeUwS+C)uO@ =Sy]` }l8^ZzRXj[^iUɺ$tj))<sbDJfg=Pk_{xaKo1:-uyG0M ԃ\0Lvuy'ȱc2Ji AdyVgVh!{]/&}}ċJ#%d !+87<;qN޼Nفl|1N:8ya  8}k¾+-$4FiZYÔXk*I&'@iI99)HSh4+2G:tGhS^繿 Kتm0 вDk}֚+QT4;sC}rՅE,8CX-e~>G&'9xpW,%Fh,Ry56Y–hW-(v_,? ; qrBk4-V7HQ;ˇ^Gv1JVV%,ik;D_W!))+BoS4QsTM;gt+ndS-~:11Sgv!0qRVh!"Ȋ(̦Yl.]PQWgٳE'`%W1{ndΗBk|Ž7ʒR~,lnoa&:ü$ 3<a[CBݮwt"o\ePJ=Hz"_c^Z.#ˆ*x z̝grY]tdkP*:97YľXyBkD4N.C_[;F9`8& !AMO c `@BA& Ost\-\NX+Xp < !bj3C&QL+*&kAQ=04}cC!9~820G'PC9xa!w&bo_1 Sw"ܱ V )Yl3+ס2KoXOx]"`^WOy :3GO0g;%Yv㐫(R/r (s } u B &FeYZh0y> =2<Ϟc/ -u= c&׭,.0"g"7 6T!vl#sc>{u/Oh Bᾈ)۴74]x7 gMӒ"d]U)}" v4co[ ɡs 5Gg=XR14?5A}D "b{0$L .\4y{_fe:kVS\\O]c^W52LSBDM! C3Dhr̦RtArx4&agaN3Cf<Ԉp4~ B'"1@.b_/xQ} _߃҉/gٓ2Qkqp0շpZ2fԫYz< 4L.Cyυι1t@鎫Fe sYfsF}^ V}N<_`p)alٶ "(XEAVZ<)2},:Ir*#m_YӼ R%a||EƼIJ,,+f"96r/}0jE/)s)cjW#w'Sʯ5<66lj$a~3Kʛy 2:cZ:Yh))+a߭K::N,Q F'qB]={.]h85C9cr=}*rk?vwV렵ٸW Rs%}rNAkDv|uFLBkWY YkX מ|)1!$#3%y?pF<@<Rr0}: }\J [5FRxY<9"SQdE(Q*Qʻ)q1E0B_O24[U'],lOb ]~WjHޏTQ5Syu wq)xnw8~)c 쫬gٲߠ H% k5dƝk> kEj,0% b"vi2Wس_CuK)K{n|>t{P1򨾜j>'kEkƗBg*H%'_aY6Bn!TL&ɌOb{c`'d^{t\i^[uɐ[}q0lM˕G:‚4kb祔c^:?bpg… +37stH:0}en6x˟%/<]BL&* 5&fK9Mq)/iyqtA%kUe[ڛKN]Ě^,"`/ s[EQQm?|XJ߅92m]G.E΃ח U*Cn.j_)Tѧj̿30ڇ!A0=͜ar I3$C^-9#|pk!)?7.x9 @OO;WƝZBFU keZ75F6Tc6"ZȚs2y/1 ʵ:u4xa`C>6Rb/Yм)^=+~uRd`/|_8xbB0?Ft||Z\##|K 0>>zxv8۴吅q 8ĥ)"6>~\8:qM}#͚'ĉ#p\׶ l#bA?)|g g9|8jP(cr,BwV (WliVxxᡁ@0Okn;ɥh$_ckCgriv}>=wGzβ KkBɛ[˪ !J)h&k2%07δt}!d<9;I&0wV/ v 0<H}L&8ob%Hi|޶o&h1L|u֦y~󛱢8fٲUsւ)0oiFx2}X[zVYr_;N(w]_4B@OanC?gĦx>мgx>ΛToZoOMp>40>V Oy V9iq!4 LN,ˢu{jsz]|"R޻&'ƚ{53ўFu(<٪9:΋]B;)B>1::8;~)Yt|0(pw2N%&X,URBK)3\zz&}ax4;ǟ(tLNg{N|Ǽ\G#C9g$^\}p?556]/RP.90 k,U8/u776s ʪ_01چ|\N 0VV*3H鴃J7iI!wG_^ypl}r*jɤSR 5QN@ iZ#1ٰy;_\3\BQQ x:WJv츟ٯ$"@6 S#qe딇(/P( Dy~TOϻ<4:-+F`0||;Xl-"uw$Цi󼕝mKʩorz"mϺ$F:~E'ҐvD\y?Rr8_He@ e~O,T.(ފR*cY^m|cVR[8 JҡSm!ΆԨb)RHG{?MpqrmN>߶Y)\p,d#xۆWY*,l6]v0h15M˙MS8+EdI='LBJIH7_9{Caз*Lq,dt >+~ّeʏ?xԕ4bBAŚjﵫ!'\Ը$WNvKO}ӽmSşذqsOy?\[,d@'73'j%kOe`1.g2"e =YIzS2|zŐƄa\U,dP;jhhhaxǶ?КZ՚.q SE+XrbOu%\GتX(H,N^~]JyEZQKceTQ]VGYqnah;y$cQahT&QPZ*iZ8UQQM.qo/T\7X"u?Mttl2Xq(IoW{R^ ux*SYJ! 4S.Jy~ BROS[V|žKNɛP(L6V^|cR7i7nZW1Fd@ Ara{詑|(T*dN]Ko?s=@ |_EvF]׍kR)eBJc" MUUbY6`~V޴dJKß&~'d3i WWWWWW
Current Directory: /usr/local/nagios/venv/lib/python3.13/site-packages/arrow
Viewing File: /usr/local/nagios/venv/lib/python3.13/site-packages/arrow/parser.py
"""Provides the :class:`Arrow <arrow.parser.DateTimeParser>` class, a better way to parse datetime strings.""" import re import sys from datetime import datetime, timedelta from datetime import tzinfo as dt_tzinfo from functools import lru_cache from typing import ( Any, ClassVar, Dict, Iterable, List, Match, Optional, Pattern, SupportsFloat, SupportsInt, Tuple, Union, cast, overload, ) from dateutil import tz from arrow import locales from arrow.constants import DEFAULT_LOCALE from arrow.util import next_weekday, normalize_timestamp if sys.version_info < (3, 8): # pragma: no cover from typing_extensions import Literal, TypedDict else: from typing import Literal, TypedDict # pragma: no cover class ParserError(ValueError): pass # Allows for ParserErrors to be propagated from _build_datetime() # when day_of_year errors occur. # Before this, the ParserErrors were caught by the try/except in # _parse_multiformat() and the appropriate error message was not # transmitted to the user. class ParserMatchError(ParserError): pass _WEEKDATE_ELEMENT = Union[str, bytes, SupportsInt, bytearray] _FORMAT_TYPE = Literal[ "YYYY", "YY", "MM", "M", "DDDD", "DDD", "DD", "D", "HH", "H", "hh", "h", "mm", "m", "ss", "s", "X", "x", "ZZZ", "ZZ", "Z", "S", "W", "MMMM", "MMM", "Do", "dddd", "ddd", "d", "a", "A", ] class _Parts(TypedDict, total=False): year: int month: int day_of_year: int day: int hour: int minute: int second: int microsecond: int timestamp: float expanded_timestamp: int tzinfo: dt_tzinfo am_pm: Literal["am", "pm"] day_of_week: int weekdate: Tuple[_WEEKDATE_ELEMENT, _WEEKDATE_ELEMENT, Optional[_WEEKDATE_ELEMENT]] class DateTimeParser: _FORMAT_RE: ClassVar[Pattern[str]] = re.compile( r"(YYY?Y?|MM?M?M?|Do|DD?D?D?|d?d?d?d|HH?|hh?|mm?|ss?|S+|ZZ?Z?|a|A|x|X|W)" ) _ESCAPE_RE: ClassVar[Pattern[str]] = re.compile(r"\[[^\[\]]*\]") _ONE_OR_TWO_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d{1,2}") _ONE_OR_TWO_OR_THREE_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d{1,3}") _ONE_OR_MORE_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d+") _TWO_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d{2}") _THREE_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d{3}") _FOUR_DIGIT_RE: ClassVar[Pattern[str]] = re.compile(r"\d{4}") _TZ_Z_RE: ClassVar[Pattern[str]] = re.compile(r"([\+\-])(\d{2})(?:(\d{2}))?|Z") _TZ_ZZ_RE: ClassVar[Pattern[str]] = re.compile(r"([\+\-])(\d{2})(?:\:(\d{2}))?|Z") _TZ_NAME_RE: ClassVar[Pattern[str]] = re.compile(r"\w[\w+\-/]+") # NOTE: timestamps cannot be parsed from natural language strings (by removing the ^...$) because it will # break cases like "15 Jul 2000" and a format list (see issue #447) _TIMESTAMP_RE: ClassVar[Pattern[str]] = re.compile(r"^\-?\d+\.?\d+$") _TIMESTAMP_EXPANDED_RE: ClassVar[Pattern[str]] = re.compile(r"^\-?\d+$") _TIME_RE: ClassVar[Pattern[str]] = re.compile( r"^(\d{2})(?:\:?(\d{2}))?(?:\:?(\d{2}))?(?:([\.\,])(\d+))?$" ) _WEEK_DATE_RE: ClassVar[Pattern[str]] = re.compile( r"(?P<year>\d{4})[\-]?W(?P<week>\d{2})[\-]?(?P<day>\d)?" ) _BASE_INPUT_RE_MAP: ClassVar[Dict[_FORMAT_TYPE, Pattern[str]]] = { "YYYY": _FOUR_DIGIT_RE, "YY": _TWO_DIGIT_RE, "MM": _TWO_DIGIT_RE, "M": _ONE_OR_TWO_DIGIT_RE, "DDDD": _THREE_DIGIT_RE, "DDD": _ONE_OR_TWO_OR_THREE_DIGIT_RE, "DD": _TWO_DIGIT_RE, "D": _ONE_OR_TWO_DIGIT_RE, "HH": _TWO_DIGIT_RE, "H": _ONE_OR_TWO_DIGIT_RE, "hh": _TWO_DIGIT_RE, "h": _ONE_OR_TWO_DIGIT_RE, "mm": _TWO_DIGIT_RE, "m": _ONE_OR_TWO_DIGIT_RE, "ss": _TWO_DIGIT_RE, "s": _ONE_OR_TWO_DIGIT_RE, "X": _TIMESTAMP_RE, "x": _TIMESTAMP_EXPANDED_RE, "ZZZ": _TZ_NAME_RE, "ZZ": _TZ_ZZ_RE, "Z": _TZ_Z_RE, "S": _ONE_OR_MORE_DIGIT_RE, "W": _WEEK_DATE_RE, } SEPARATORS: ClassVar[List[str]] = ["-", "/", "."] locale: locales.Locale _input_re_map: Dict[_FORMAT_TYPE, Pattern[str]] def __init__(self, locale: str = DEFAULT_LOCALE, cache_size: int = 0) -> None: self.locale = locales.get_locale(locale) self._input_re_map = self._BASE_INPUT_RE_MAP.copy() self._input_re_map.update( { "MMMM": self._generate_choice_re( self.locale.month_names[1:], re.IGNORECASE ), "MMM": self._generate_choice_re( self.locale.month_abbreviations[1:], re.IGNORECASE ), "Do": re.compile(self.locale.ordinal_day_re), "dddd": self._generate_choice_re( self.locale.day_names[1:], re.IGNORECASE ), "ddd": self._generate_choice_re( self.locale.day_abbreviations[1:], re.IGNORECASE ), "d": re.compile(r"[1-7]"), "a": self._generate_choice_re( (self.locale.meridians["am"], self.locale.meridians["pm"]) ), # note: 'A' token accepts both 'am/pm' and 'AM/PM' formats to # ensure backwards compatibility of this token "A": self._generate_choice_re(self.locale.meridians.values()), } ) if cache_size > 0: self._generate_pattern_re = lru_cache(maxsize=cache_size)( # type: ignore self._generate_pattern_re ) # TODO: since we support more than ISO 8601, we should rename this function # IDEA: break into multiple functions def parse_iso( self, datetime_string: str, normalize_whitespace: bool = False ) -> datetime: if normalize_whitespace: datetime_string = re.sub(r"\s+", " ", datetime_string.strip()) has_space_divider = " " in datetime_string has_t_divider = "T" in datetime_string num_spaces = datetime_string.count(" ") if has_space_divider and num_spaces != 1 or has_t_divider and num_spaces > 0: raise ParserError( f"Expected an ISO 8601-like string, but was given {datetime_string!r}. " "Try passing in a format string to resolve this." ) has_time = has_space_divider or has_t_divider has_tz = False # date formats (ISO 8601 and others) to test against # NOTE: YYYYMM is omitted to avoid confusion with YYMMDD (no longer part of ISO 8601, but is still often used) formats = [ "YYYY-MM-DD", "YYYY-M-DD", "YYYY-M-D", "YYYY/MM/DD", "YYYY/M/DD", "YYYY/M/D", "YYYY.MM.DD", "YYYY.M.DD", "YYYY.M.D", "YYYYMMDD", "YYYY-DDDD", "YYYYDDDD", "YYYY-MM", "YYYY/MM", "YYYY.MM", "YYYY", "W", ] if has_time: if has_space_divider: date_string, time_string = datetime_string.split(" ", 1) else: date_string, time_string = datetime_string.split("T", 1) time_parts = re.split( r"[\+\-Z]", time_string, maxsplit=1, flags=re.IGNORECASE ) time_components: Optional[Match[str]] = self._TIME_RE.match(time_parts[0]) if time_components is None: raise ParserError( "Invalid time component provided. " "Please specify a format or provide a valid time component in the basic or extended ISO 8601 time format." ) ( hours, minutes, seconds, subseconds_sep, subseconds, ) = time_components.groups() has_tz = len(time_parts) == 2 has_minutes = minutes is not None has_seconds = seconds is not None has_subseconds = subseconds is not None is_basic_time_format = ":" not in time_parts[0] tz_format = "Z" # use 'ZZ' token instead since tz offset is present in non-basic format if has_tz and ":" in time_parts[1]: tz_format = "ZZ" time_sep = "" if is_basic_time_format else ":" if has_subseconds: time_string = "HH{time_sep}mm{time_sep}ss{subseconds_sep}S".format( time_sep=time_sep, subseconds_sep=subseconds_sep ) elif has_seconds: time_string = "HH{time_sep}mm{time_sep}ss".format(time_sep=time_sep) elif has_minutes: time_string = f"HH{time_sep}mm" else: time_string = "HH" if has_space_divider: formats = [f"{f} {time_string}" for f in formats] else: formats = [f"{f}T{time_string}" for f in formats] if has_time and has_tz: # Add "Z" or "ZZ" to the format strings to indicate to # _parse_token() that a timezone needs to be parsed formats = [f"{f}{tz_format}" for f in formats] return self._parse_multiformat(datetime_string, formats) def parse( self, datetime_string: str, fmt: Union[List[str], str], normalize_whitespace: bool = False, ) -> datetime: if normalize_whitespace: datetime_string = re.sub(r"\s+", " ", datetime_string) if isinstance(fmt, list): return self._parse_multiformat(datetime_string, fmt) try: fmt_tokens: List[_FORMAT_TYPE] fmt_pattern_re: Pattern[str] fmt_tokens, fmt_pattern_re = self._generate_pattern_re(fmt) except re.error as e: raise ParserMatchError( f"Failed to generate regular expression pattern: {e}." ) match = fmt_pattern_re.search(datetime_string) if match is None: raise ParserMatchError( f"Failed to match {fmt!r} when parsing {datetime_string!r}." ) parts: _Parts = {} for token in fmt_tokens: value: Union[Tuple[str, str, str], str] if token == "Do": value = match.group("value") elif token == "W": value = (match.group("year"), match.group("week"), match.group("day")) else: value = match.group(token) if value is None: raise ParserMatchError( f"Unable to find a match group for the specified token {token!r}." ) self._parse_token(token, value, parts) # type: ignore[arg-type] return self._build_datetime(parts) def _generate_pattern_re(self, fmt: str) -> Tuple[List[_FORMAT_TYPE], Pattern[str]]: # fmt is a string of tokens like 'YYYY-MM-DD' # we construct a new string by replacing each # token by its pattern: # 'YYYY-MM-DD' -> '(?P<YYYY>\d{4})-(?P<MM>\d{2})-(?P<DD>\d{2})' tokens: List[_FORMAT_TYPE] = [] offset = 0 # Escape all special RegEx chars escaped_fmt = re.escape(fmt) # Extract the bracketed expressions to be reinserted later. escaped_fmt = re.sub(self._ESCAPE_RE, "#", escaped_fmt) # Any number of S is the same as one. # TODO: allow users to specify the number of digits to parse escaped_fmt = re.sub(r"S+", "S", escaped_fmt) escaped_data = re.findall(self._ESCAPE_RE, fmt) fmt_pattern = escaped_fmt for m in self._FORMAT_RE.finditer(escaped_fmt): token: _FORMAT_TYPE = cast(_FORMAT_TYPE, m.group(0)) try: input_re = self._input_re_map[token] except KeyError: raise ParserError(f"Unrecognized token {token!r}.") input_pattern = f"(?P<{token}>{input_re.pattern})" tokens.append(token) # a pattern doesn't have the same length as the token # it replaces! We keep the difference in the offset variable. # This works because the string is scanned left-to-right and matches # are returned in the order found by finditer. fmt_pattern = ( fmt_pattern[: m.start() + offset] + input_pattern + fmt_pattern[m.end() + offset :] ) offset += len(input_pattern) - (m.end() - m.start()) final_fmt_pattern = "" split_fmt = fmt_pattern.split(r"\#") # Due to the way Python splits, 'split_fmt' will always be longer for i in range(len(split_fmt)): final_fmt_pattern += split_fmt[i] if i < len(escaped_data): final_fmt_pattern += escaped_data[i][1:-1] # Wrap final_fmt_pattern in a custom word boundary to strictly # match the formatting pattern and filter out date and time formats # that include junk such as: blah1998-09-12 blah, blah 1998-09-12blah, # blah1998-09-12blah. The custom word boundary matches every character # that is not a whitespace character to allow for searching for a date # and time string in a natural language sentence. Therefore, searching # for a string of the form YYYY-MM-DD in "blah 1998-09-12 blah" will # work properly. # Certain punctuation before or after the target pattern such as # "1998-09-12," is permitted. For the full list of valid punctuation, # see the documentation. starting_word_boundary = ( r"(?<!\S\S)" # Don't have two consecutive non-whitespace characters. This ensures that we allow cases # like .11.25.2019 but not 1.11.25.2019 (for pattern MM.DD.YYYY) r"(?<![^\,\.\;\:\?\!\"\'\`\[\]\{\}\(\)<>\s])" # This is the list of punctuation that is ok before the # pattern (i.e. "It can't not be these characters before the pattern") r"(\b|^)" # The \b is to block cases like 1201912 but allow 201912 for pattern YYYYMM. The ^ was necessary to allow a # negative number through i.e. before epoch numbers ) ending_word_boundary = ( r"(?=[\,\.\;\:\?\!\"\'\`\[\]\{\}\(\)\<\>]?" # Positive lookahead stating that these punctuation marks # can appear after the pattern at most 1 time r"(?!\S))" # Don't allow any non-whitespace character after the punctuation ) bounded_fmt_pattern = r"{}{}{}".format( starting_word_boundary, final_fmt_pattern, ending_word_boundary ) return tokens, re.compile(bounded_fmt_pattern, flags=re.IGNORECASE) @overload def _parse_token( self, token: Literal[ "YYYY", "YY", "MM", "M", "DDDD", "DDD", "DD", "D", "Do", "HH", "hh", "h", "H", "mm", "m", "ss", "s", "x", ], value: Union[str, bytes, SupportsInt, bytearray], parts: _Parts, ) -> None: ... # pragma: no cover @overload def _parse_token( self, token: Literal["X"], value: Union[str, bytes, SupportsFloat, bytearray], parts: _Parts, ) -> None: ... # pragma: no cover @overload def _parse_token( self, token: Literal["MMMM", "MMM", "dddd", "ddd", "S"], value: Union[str, bytes, bytearray], parts: _Parts, ) -> None: ... # pragma: no cover @overload def _parse_token( self, token: Literal["a", "A", "ZZZ", "ZZ", "Z"], value: Union[str, bytes], parts: _Parts, ) -> None: ... # pragma: no cover @overload def _parse_token( self, token: Literal["W"], value: Tuple[_WEEKDATE_ELEMENT, _WEEKDATE_ELEMENT, Optional[_WEEKDATE_ELEMENT]], parts: _Parts, ) -> None: ... # pragma: no cover def _parse_token( self, token: Any, value: Any, parts: _Parts, ) -> None: if token == "YYYY": parts["year"] = int(value) elif token == "YY": value = int(value) parts["year"] = 1900 + value if value > 68 else 2000 + value elif token in ["MMMM", "MMM"]: # FIXME: month_number() is nullable parts["month"] = self.locale.month_number(value.lower()) # type: ignore[typeddict-item] elif token in ["MM", "M"]: parts["month"] = int(value) elif token in ["DDDD", "DDD"]: parts["day_of_year"] = int(value) elif token in ["DD", "D"]: parts["day"] = int(value) elif token == "Do": parts["day"] = int(value) elif token == "dddd": # locale day names are 1-indexed day_of_week = [x.lower() for x in self.locale.day_names].index( value.lower() ) parts["day_of_week"] = day_of_week - 1 elif token == "ddd": # locale day abbreviations are 1-indexed day_of_week = [x.lower() for x in self.locale.day_abbreviations].index( value.lower() ) parts["day_of_week"] = day_of_week - 1 elif token.upper() in ["HH", "H"]: parts["hour"] = int(value) elif token in ["mm", "m"]: parts["minute"] = int(value) elif token in ["ss", "s"]: parts["second"] = int(value) elif token == "S": # We have the *most significant* digits of an arbitrary-precision integer. # We want the six most significant digits as an integer, rounded. # IDEA: add nanosecond support somehow? Need datetime support for it first. value = value.ljust(7, "0") # floating-point (IEEE-754) defaults to half-to-even rounding seventh_digit = int(value[6]) if seventh_digit == 5: rounding = int(value[5]) % 2 elif seventh_digit > 5: rounding = 1 else: rounding = 0 parts["microsecond"] = int(value[:6]) + rounding elif token == "X": parts["timestamp"] = float(value) elif token == "x": parts["expanded_timestamp"] = int(value) elif token in ["ZZZ", "ZZ", "Z"]: parts["tzinfo"] = TzinfoParser.parse(value) elif token in ["a", "A"]: if value in (self.locale.meridians["am"], self.locale.meridians["AM"]): parts["am_pm"] = "am" if "hour" in parts and not 0 <= parts["hour"] <= 12: raise ParserMatchError( f"Hour token value must be between 0 and 12 inclusive for token {token!r}." ) elif value in (self.locale.meridians["pm"], self.locale.meridians["PM"]): parts["am_pm"] = "pm" elif token == "W": parts["weekdate"] = value @staticmethod def _build_datetime(parts: _Parts) -> datetime: weekdate = parts.get("weekdate") if weekdate is not None: year, week = int(weekdate[0]), int(weekdate[1]) if weekdate[2] is not None: _day = int(weekdate[2]) else: # day not given, default to 1 _day = 1 date_string = f"{year}-{week}-{_day}" # tokens for ISO 8601 weekdates dt = datetime.strptime(date_string, "%G-%V-%u") parts["year"] = dt.year parts["month"] = dt.month parts["day"] = dt.day timestamp = parts.get("timestamp") if timestamp is not None: return datetime.fromtimestamp(timestamp, tz=tz.tzutc()) expanded_timestamp = parts.get("expanded_timestamp") if expanded_timestamp is not None: return datetime.fromtimestamp( normalize_timestamp(expanded_timestamp), tz=tz.tzutc(), ) day_of_year = parts.get("day_of_year") if day_of_year is not None: _year = parts.get("year") month = parts.get("month") if _year is None: raise ParserError( "Year component is required with the DDD and DDDD tokens." ) if month is not None: raise ParserError( "Month component is not allowed with the DDD and DDDD tokens." ) date_string = f"{_year}-{day_of_year}" try: dt = datetime.strptime(date_string, "%Y-%j") except ValueError: raise ParserError( f"The provided day of year {day_of_year!r} is invalid." ) parts["year"] = dt.year parts["month"] = dt.month parts["day"] = dt.day day_of_week: Optional[int] = parts.get("day_of_week") day = parts.get("day") # If day is passed, ignore day of week if day_of_week is not None and day is None: year = parts.get("year", 1970) month = parts.get("month", 1) day = 1 # dddd => first day of week after epoch # dddd YYYY => first day of week in specified year # dddd MM YYYY => first day of week in specified year and month # dddd MM => first day after epoch in specified month next_weekday_dt = next_weekday(datetime(year, month, day), day_of_week) parts["year"] = next_weekday_dt.year parts["month"] = next_weekday_dt.month parts["day"] = next_weekday_dt.day am_pm = parts.get("am_pm") hour = parts.get("hour", 0) if am_pm == "pm" and hour < 12: hour += 12 elif am_pm == "am" and hour == 12: hour = 0 # Support for midnight at the end of day if hour == 24: if parts.get("minute", 0) != 0: raise ParserError("Midnight at the end of day must not contain minutes") if parts.get("second", 0) != 0: raise ParserError("Midnight at the end of day must not contain seconds") if parts.get("microsecond", 0) != 0: raise ParserError( "Midnight at the end of day must not contain microseconds" ) hour = 0 day_increment = 1 else: day_increment = 0 # account for rounding up to 1000000 microsecond = parts.get("microsecond", 0) if microsecond == 1000000: microsecond = 0 second_increment = 1 else: second_increment = 0 increment = timedelta(days=day_increment, seconds=second_increment) return ( datetime( year=parts.get("year", 1), month=parts.get("month", 1), day=parts.get("day", 1), hour=hour, minute=parts.get("minute", 0), second=parts.get("second", 0), microsecond=microsecond, tzinfo=parts.get("tzinfo"), ) + increment ) def _parse_multiformat(self, string: str, formats: Iterable[str]) -> datetime: _datetime: Optional[datetime] = None for fmt in formats: try: _datetime = self.parse(string, fmt) break except ParserMatchError: pass if _datetime is None: supported_formats = ", ".join(formats) raise ParserError( f"Could not match input {string!r} to any of the following formats: {supported_formats}." ) return _datetime # generates a capture group of choices separated by an OR operator @staticmethod def _generate_choice_re( choices: Iterable[str], flags: Union[int, re.RegexFlag] = 0 ) -> Pattern[str]: return re.compile(r"({})".format("|".join(choices)), flags=flags) class TzinfoParser: _TZINFO_RE: ClassVar[Pattern[str]] = re.compile( r"^(?:\(UTC)*([\+\-])?(\d{2})(?:\:?(\d{2}))?" ) @classmethod def parse(cls, tzinfo_string: str) -> dt_tzinfo: tzinfo: Optional[dt_tzinfo] = None if tzinfo_string == "local": tzinfo = tz.tzlocal() elif tzinfo_string in ["utc", "UTC", "Z"]: tzinfo = tz.tzutc() else: iso_match = cls._TZINFO_RE.match(tzinfo_string) if iso_match: sign: Optional[str] hours: str minutes: Union[str, int, None] sign, hours, minutes = iso_match.groups() seconds = int(hours) * 3600 + int(minutes or 0) * 60 if sign == "-": seconds *= -1 tzinfo = tz.tzoffset(None, seconds) else: tzinfo = tz.gettz(tzinfo_string) if tzinfo is None: raise ParserError(f"Could not parse timezone expression {tzinfo_string!r}.") return tzinfo