PNG  IHDRX cHRMz&u0`:pQ<bKGD pHYsodtIME MeqIDATxw]Wug^Qd˶ 6`!N:!@xI~)%7%@Bh&`lnjVF29gΨ4E$|>cɚ{gk= %,a KX%,a KX%,a KX%,a KX%,a KX%,a KX%, b` ǟzeאfp]<!SJmɤY޲ڿ,%c ~ع9VH.!Ͳz&QynֺTkRR.BLHi٪:l;@(!MԴ=žI,:o&N'Kù\vRmJ雵֫AWic H@" !: Cé||]k-Ha oݜ:y F())u]aG7*JV@J415p=sZH!=!DRʯvɱh~V\}v/GKY$n]"X"}t@ xS76^[bw4dsce)2dU0 CkMa-U5tvLƀ~mlMwfGE/-]7XAƟ`׮g ewxwC4\[~7@O-Q( a*XGƒ{ ՟}$_y3tĐƤatgvێi|K=uVyrŲlLӪuܿzwk$m87k( `múcE)"@rK( z4$D; 2kW=Xb$V[Ru819קR~qloѱDyįݎ*mxw]y5e4K@ЃI0A D@"BDk_)N\8͜9dz"fK0zɿvM /.:2O{ Nb=M=7>??Zuo32 DLD@D| &+֎C #B8ַ`bOb $D#ͮҪtx]%`ES`Ru[=¾!@Od37LJ0!OIR4m]GZRJu$‡c=%~s@6SKy?CeIh:[vR@Lh | (BhAMy=݃  G"'wzn޺~8ԽSh ~T*A:xR[ܹ?X[uKL_=fDȊ؂p0}7=D$Ekq!/t.*2ʼnDbŞ}DijYaȲ(""6HA;:LzxQ‘(SQQ}*PL*fc\s `/d'QXW, e`#kPGZuŞuO{{wm[&NBTiiI0bukcA9<4@SӊH*؎4U/'2U5.(9JuDfrޱtycU%j(:RUbArLֺN)udA':uGQN"-"Is.*+k@ `Ojs@yU/ H:l;@yyTn}_yw!VkRJ4P)~y#)r,D =ě"Q]ci'%HI4ZL0"MJy 8A{ aN<8D"1#IJi >XjX֔#@>-{vN!8tRݻ^)N_╗FJEk]CT՟ YP:_|H1@ CBk]yKYp|og?*dGvzنzӴzjֺNkC~AbZƷ`.H)=!QͷVTT(| u78y֮}|[8-Vjp%2JPk[}ԉaH8Wpqhwr:vWª<}l77_~{s۴V+RCģ%WRZ\AqHifɤL36: #F:p]Bq/z{0CU6ݳEv_^k7'>sq*+kH%a`0ԣisqにtү04gVgW΂iJiS'3w.w}l6MC2uԯ|>JF5`fV5m`Y**Db1FKNttu]4ccsQNnex/87+}xaUW9y>ͯ骵G{䩓Գ3+vU}~jJ.NFRD7<aJDB1#ҳgSb,+CS?/ VG J?|?,2#M9}B)MiE+G`-wo߫V`fio(}S^4e~V4bHOYb"b#E)dda:'?}׮4繏`{7Z"uny-?ǹ;0MKx{:_pÚmFמ:F " .LFQLG)Q8qN q¯¯3wOvxDb\. BKD9_NN &L:4D{mm o^tֽ:q!ƥ}K+<"m78N< ywsard5+вz~mnG)=}lYݧNj'QJS{S :UYS-952?&O-:W}(!6Mk4+>A>j+i|<<|;ر^߉=HE|V#F)Emm#}/"y GII웻Jі94+v뾧xu~5C95~ūH>c@덉pʃ1/4-A2G%7>m;–Y,cyyaln" ?ƻ!ʪ<{~h~i y.zZB̃/,雋SiC/JFMmBH&&FAbϓO^tubbb_hZ{_QZ-sύodFgO(6]TJA˯#`۶ɟ( %$&+V'~hiYy>922 Wp74Zkq+Ovn錄c>8~GqܲcWꂎz@"1A.}T)uiW4="jJ2W7mU/N0gcqܗOO}?9/wìXžΏ0 >֩(V^Rh32!Hj5`;O28؇2#ݕf3 ?sJd8NJ@7O0 b־?lldщ̡&|9C.8RTWwxWy46ah嘦mh٤&l zCy!PY?: CJyв]dm4ǜҐR޻RլhX{FƯanшQI@x' ao(kUUuxW_Ñ줮[w8 FRJ(8˼)_mQ _!RJhm=!cVmm ?sFOnll6Qk}alY}; "baӌ~M0w,Ggw2W:G/k2%R,_=u`WU R.9T"v,<\Ik޽/2110Ӿxc0gyC&Ny޽JҢrV6N ``یeA16"J³+Rj*;BϜkZPJaÍ<Jyw:NP8/D$ 011z֊Ⱳ3ι֘k1V_"h!JPIΣ'ɜ* aEAd:ݺ>y<}Lp&PlRfTb1]o .2EW\ͮ]38؋rTJsǏP@芎sF\> P^+dYJLbJ C-xϐn> ι$nj,;Ǖa FU *择|h ~izť3ᤓ`K'-f tL7JK+vf2)V'-sFuB4i+m+@My=O҈0"|Yxoj,3]:cо3 $#uŘ%Y"y죯LebqtҢVzq¼X)~>4L׶m~[1_k?kxֺQ`\ |ٛY4Ѯr!)N9{56(iNq}O()Em]=F&u?$HypWUeB\k]JɩSع9 Zqg4ZĊo oMcjZBU]B\TUd34ݝ~:7ڶSUsB0Z3srx 7`:5xcx !qZA!;%͚7&P H<WL!džOb5kF)xor^aujƍ7 Ǡ8/p^(L>ὴ-B,{ۇWzֺ^k]3\EE@7>lYBȝR.oHnXO/}sB|.i@ɥDB4tcm,@ӣgdtJ!lH$_vN166L__'Z)y&kH;:,Y7=J 9cG) V\hjiE;gya~%ks_nC~Er er)muuMg2;֫R)Md) ,¶ 2-wr#F7<-BBn~_(o=KO㭇[Xv eN_SMgSҐ BS헃D%g_N:/pe -wkG*9yYSZS.9cREL !k}<4_Xs#FmҶ:7R$i,fi!~' # !6/S6y@kZkZcX)%5V4P]VGYq%H1!;e1MV<!ϐHO021Dp= HMs~~a)ަu7G^];git!Frl]H/L$=AeUvZE4P\.,xi {-~p?2b#amXAHq)MWǾI_r`S Hz&|{ +ʖ_= (YS(_g0a03M`I&'9vl?MM+m~}*xT۲(fY*V4x@29s{DaY"toGNTO+xCAO~4Ϳ;p`Ѫ:>Ҵ7K 3}+0 387x\)a"/E>qpWB=1 ¨"MP(\xp߫́A3+J] n[ʼnӼaTbZUWb={~2ooKױӰp(CS\S筐R*JغV&&"FA}J>G֐p1ٸbk7 ŘH$JoN <8s^yk_[;gy-;߉DV{c B yce% aJhDȶ 2IdйIB/^n0tNtџdcKj4϶v~- CBcgqx9= PJ) dMsjpYB] GD4RDWX +h{y`,3ꊕ$`zj*N^TP4L:Iz9~6s) Ga:?y*J~?OrMwP\](21sZUD ?ܟQ5Q%ggW6QdO+\@ ̪X'GxN @'4=ˋ+*VwN ne_|(/BDfj5(Dq<*tNt1х!MV.C0 32b#?n0pzj#!38}޴o1KovCJ`8ŗ_"]] rDUy޲@ Ȗ-;xџ'^Y`zEd?0„ DAL18IS]VGq\4o !swV7ˣι%4FѮ~}6)OgS[~Q vcYbL!wG3 7띸*E Pql8=jT\꘿I(z<[6OrR8ºC~ډ]=rNl[g|v TMTղb-o}OrP^Q]<98S¤!k)G(Vkwyqyr޽Nv`N/e p/~NAOk \I:G6]4+K;j$R:Mi #*[AȚT,ʰ,;N{HZTGMoּy) ]%dHء9Պ䠬|<45,\=[bƟ8QXeB3- &dҩ^{>/86bXmZ]]yޚN[(WAHL$YAgDKp=5GHjU&99v簪C0vygln*P)9^͞}lMuiH!̍#DoRBn9l@ xA/_v=ȺT{7Yt2N"4!YN`ae >Q<XMydEB`VU}u]嫇.%e^ánE87Mu\t`cP=AD/G)sI"@MP;)]%fH9'FNsj1pVhY&9=0pfuJ&gޤx+k:!r˭wkl03׼Ku C &ѓYt{.O.zҏ z}/tf_wEp2gvX)GN#I ݭ߽v/ .& и(ZF{e"=V!{zW`, ]+LGz"(UJp|j( #V4, 8B 0 9OkRrlɱl94)'VH9=9W|>PS['G(*I1==C<5"Pg+x'K5EMd؞Af8lG ?D FtoB[je?{k3zQ vZ;%Ɠ,]E>KZ+T/ EJxOZ1i #T<@ I}q9/t'zi(EMqw`mYkU6;[t4DPeckeM;H}_g pMww}k6#H㶏+b8雡Sxp)&C $@'b,fPߑt$RbJ'vznuS ~8='72_`{q纶|Q)Xk}cPz9p7O:'|G~8wx(a 0QCko|0ASD>Ip=4Q, d|F8RcU"/KM opKle M3#i0c%<7׿p&pZq[TR"BpqauIp$ 8~Ĩ!8Սx\ւdT>>Z40ks7 z2IQ}ItԀ<-%S⍤};zIb$I 5K}Q͙D8UguWE$Jh )cu4N tZl+[]M4k8֦Zeq֮M7uIqG 1==tLtR,ƜSrHYt&QP윯Lg' I,3@P'}'R˪e/%-Auv·ñ\> vDJzlӾNv5:|K/Jb6KI9)Zh*ZAi`?S {aiVDԲuy5W7pWeQJk֤#5&V<̺@/GH?^τZL|IJNvI:'P=Ϛt"¨=cud S Q.Ki0 !cJy;LJR;G{BJy޺[^8fK6)=yʊ+(k|&xQ2`L?Ȓ2@Mf 0C`6-%pKpm')c$׻K5[J*U[/#hH!6acB JA _|uMvDyk y)6OPYjœ50VT K}cǻP[ $:]4MEA.y)|B)cf-A?(e|lɉ#P9V)[9t.EiQPDѠ3ϴ;E:+Օ t ȥ~|_N2,ZJLt4! %ա]u {+=p.GhNcŞQI?Nd'yeh n7zi1DB)1S | S#ًZs2|Ɛy$F SxeX{7Vl.Src3E℃Q>b6G ўYCmtկ~=K0f(=LrAS GN'ɹ9<\!a`)֕y[uՍ[09` 9 +57ts6}b4{oqd+J5fa/,97J#6yν99mRWxJyѡyu_TJc`~W>l^q#Ts#2"nD1%fS)FU w{ܯ R{ ˎ󅃏џDsZSQS;LV;7 Od1&1n$ N /.q3~eNɪ]E#oM~}v֯FڦwyZ=<<>Xo稯lfMFV6p02|*=tV!c~]fa5Y^Q_WN|Vs 0ҘދU97OI'N2'8N֭fgg-}V%y]U4 峧p*91#9U kCac_AFңĪy뚇Y_AiuYyTTYЗ-(!JFLt›17uTozc. S;7A&&<ԋ5y;Ro+:' *eYJkWR[@F %SHWP 72k4 qLd'J "zB6{AC0ƁA6U.'F3:Ȅ(9ΜL;D]m8ڥ9}dU "v!;*13Rg^fJyShyy5auA?ɩGHRjo^]׽S)Fm\toy 4WQS@mE#%5ʈfFYDX ~D5Ϡ9tE9So_aU4?Ѽm%&c{n>.KW1Tlb}:j uGi(JgcYj0qn+>) %\!4{LaJso d||u//P_y7iRJ߬nHOy) l+@$($VFIQ9%EeKʈU. ia&FY̒mZ=)+qqoQn >L!qCiDB;Y<%} OgBxB!ØuG)WG9y(Ą{_yesuZmZZey'Wg#C~1Cev@0D $a@˲(.._GimA:uyw֬%;@!JkQVM_Ow:P.s\)ot- ˹"`B,e CRtaEUP<0'}r3[>?G8xU~Nqu;Wm8\RIkբ^5@k+5(By'L&'gBJ3ݶ!/㮻w҅ yqPWUg<e"Qy*167΃sJ\oz]T*UQ<\FԎ`HaNmڜ6DysCask8wP8y9``GJ9lF\G g's Nn͵MLN֪u$| /|7=]O)6s !ĴAKh]q_ap $HH'\1jB^s\|- W1:=6lJBqjY^LsPk""`]w)󭃈,(HC ?䔨Y$Sʣ{4Z+0NvQkhol6C.婧/u]FwiVjZka&%6\F*Ny#8O,22+|Db~d ~Çwc N:FuuCe&oZ(l;@ee-+Wn`44AMK➝2BRՈt7g*1gph9N) *"TF*R(#'88pm=}X]u[i7bEc|\~EMn}P瘊J)K.0i1M6=7'_\kaZ(Th{K*GJyytw"IO-PWJk)..axӝ47"89Cc7ĐBiZx 7m!fy|ϿF9CbȩV 9V-՛^pV̌ɄS#Bv4-@]Vxt-Z, &ֺ*diؠ2^VXbs֔Ìl.jQ]Y[47gj=幽ex)A0ip׳ W2[ᎇhuE^~q흙L} #-b۸oFJ_QP3r6jr+"nfzRJTUqoaۍ /$d8Mx'ݓ= OՃ| )$2mcM*cЙj}f };n YG w0Ia!1Q.oYfr]DyISaP}"dIӗթO67jqR ҊƐƈaɤGG|h;t]䗖oSv|iZqX)oalv;۩meEJ\!8=$4QU4Xo&VEĊ YS^E#d,yX_> ۘ-e\ "Wa6uLĜZi`aD9.% w~mB(02G[6y.773a7 /=o7D)$Z 66 $bY^\CuP. (x'"J60׿Y:Oi;F{w佩b+\Yi`TDWa~|VH)8q/=9!g߆2Y)?ND)%?Ǐ`k/sn:;O299yB=a[Ng 3˲N}vLNy;*?x?~L&=xyӴ~}q{qE*IQ^^ͧvü{Huu=R|>JyUlZV, B~/YF!Y\u_ݼF{_C)LD]m {H 0ihhadd nUkf3oٺCvE\)QJi+֥@tDJkB$1!Đr0XQ|q?d2) Ӣ_}qv-< FŊ߫%roppVBwü~JidY4:}L6M7f٬F "?71<2#?Jyy4뷢<_a7_=Q E=S1И/9{+93֮E{ǂw{))?maÆm(uLE#lïZ  ~d];+]h j?!|$F}*"4(v'8s<ŏUkm7^7no1w2ؗ}TrͿEk>p'8OB7d7R(A 9.*Mi^ͳ; eeUwS+C)uO@ =Sy]` }l8^ZzRXj[^iUɺ$tj))<sbDJfg=Pk_{xaKo1:-uyG0M ԃ\0Lvuy'ȱc2Ji AdyVgVh!{]/&}}ċJ#%d !+87<;qN޼Nفl|1N:8ya  8}k¾+-$4FiZYÔXk*I&'@iI99)HSh4+2G:tGhS^繿 Kتm0 вDk}֚+QT4;sC}rՅE,8CX-e~>G&'9xpW,%Fh,Ry56Y–hW-(v_,? ; qrBk4-V7HQ;ˇ^Gv1JVV%,ik;D_W!))+BoS4QsTM;gt+ndS-~:11Sgv!0qRVh!"Ȋ(̦Yl.]PQWgٳE'`%W1{ndΗBk|Ž7ʒR~,lnoa&:ü$ 3<a[CBݮwt"o\ePJ=Hz"_c^Z.#ˆ*x z̝grY]tdkP*:97YľXyBkD4N.C_[;F9`8& !AMO c `@BA& Ost\-\NX+Xp < !bj3C&QL+*&kAQ=04}cC!9~820G'PC9xa!w&bo_1 Sw"ܱ V )Yl3+ס2KoXOx]"`^WOy :3GO0g;%Yv㐫(R/r (s } u B &FeYZh0y> =2<Ϟc/ -u= c&׭,.0"g"7 6T!vl#sc>{u/Oh Bᾈ)۴74]x7 gMӒ"d]U)}" v4co[ ɡs 5Gg=XR14?5A}D "b{0$L .\4y{_fe:kVS\\O]c^W52LSBDM! C3Dhr̦RtArx4&agaN3Cf<Ԉp4~ B'"1@.b_/xQ} _߃҉/gٓ2Qkqp0շpZ2fԫYz< 4L.Cyυι1t@鎫Fe sYfsF}^ V}N<_`p)alٶ "(XEAVZ<)2},:Ir*#m_YӼ R%a||EƼIJ,,+f"96r/}0jE/)s)cjW#w'Sʯ5<66lj$a~3Kʛy 2:cZ:Yh))+a߭K::N,Q F'qB]={.]h85C9cr=}*rk?vwV렵ٸW Rs%}rNAkDv|uFLBkWY YkX מ|)1!$#3%y?pF<@<Rr0}: }\J [5FRxY<9"SQdE(Q*Qʻ)q1E0B_O24[U'],lOb ]~WjHޏTQ5Syu wq)xnw8~)c 쫬gٲߠ H% k5dƝk> kEj,0% b"vi2Wس_CuK)K{n|>t{P1򨾜j>'kEkƗBg*H%'_aY6Bn!TL&ɌOb{c`'d^{t\i^[uɐ[}q0lM˕G:‚4kb祔c^:?bpg… +37stH:0}en6x˟%/<]BL&* 5&fK9Mq)/iyqtA%kUe[ڛKN]Ě^,"`/ s[EQQm?|XJ߅92m]G.E΃ח U*Cn.j_)Tѧj̿30ڇ!A0=͜ar I3$C^-9#|pk!)?7.x9 @OO;WƝZBFU keZ75F6Tc6"ZȚs2y/1 ʵ:u4xa`C>6Rb/Yм)^=+~uRd`/|_8xbB0?Ft||Z\##|K 0>>zxv8۴吅q 8ĥ)"6>~\8:qM}#͚'ĉ#p\׶ l#bA?)|g g9|8jP(cr,BwV (WliVxxᡁ@0Okn;ɥh$_ckCgriv}>=wGzβ KkBɛ[˪ !J)h&k2%07δt}!d<9;I&0wV/ v 0<H}L&8ob%Hi|޶o&h1L|u֦y~󛱢8fٲUsւ)0oiFx2}X[zVYr_;N(w]_4B@OanC?gĦx>мgx>ΛToZoOMp>40>V Oy V9iq!4 LN,ˢu{jsz]|"R޻&'ƚ{53ўFu(<٪9:΋]B;)B>1::8;~)Yt|0(pw2N%&X,URBK)3\zz&}ax4;ǟ(tLNg{N|Ǽ\G#C9g$^\}p?556]/RP.90 k,U8/u776s ʪ_01چ|\N 0VV*3H鴃J7iI!wG_^ypl}r*jɤSR 5QN@ iZ#1ٰy;_\3\BQQ x:WJv츟ٯ$"@6 S#qe딇(/P( Dy~TOϻ<4:-+F`0||;Xl-"uw$Цi󼕝mKʩorz"mϺ$F:~E'ҐvD\y?Rr8_He@ e~O,T.(ފR*cY^m|cVR[8 JҡSm!ΆԨb)RHG{?MpqrmN>߶Y)\p,d#xۆWY*,l6]v0h15M˙MS8+EdI='LBJIH7_9{Caз*Lq,dt >+~ّeʏ?xԕ4bBAŚjﵫ!'\Ը$WNvKO}ӽmSşذqsOy?\[,d@'73'j%kOe`1.g2"e =YIzS2|zŐƄa\U,dP;jhhhaxǶ?КZ՚.q SE+XrbOu%\GتX(H,N^~]JyEZQKceTQ]VGYqnah;y$cQahT&QPZ*iZ8UQQM.qo/T\7X"u?Mttl2Xq(IoW{R^ ux*SYJ! 4S.Jy~ BROS[V|žKNɛP(L6V^|cR7i7nZW1Fd@ Ara{詑|(T*dN]Ko?s=@ |_EvF]׍kR)eBJc" MUUbY6`~V޴dJKß&~'d3i WWWWWW
Current Directory: /opt/imh-python/lib/python3.9/site-packages/celery/backends
Viewing File: /opt/imh-python/lib/python3.9/site-packages/celery/backends/redis.py
"""Redis result store backend.""" import time from contextlib import contextmanager from functools import partial from ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED from urllib.parse import unquote from kombu.utils.functional import retry_over_time from kombu.utils.objects import cached_property from kombu.utils.url import _parse_url, maybe_sanitize_url from celery import states from celery._state import task_join_will_block from celery.canvas import maybe_signature from celery.exceptions import BackendStoreError, ChordError, ImproperlyConfigured from celery.result import GroupResult, allow_join_result from celery.utils.functional import _regen, dictfilter from celery.utils.log import get_logger from celery.utils.time import humanize_seconds from .asynchronous import AsyncBackendMixin, BaseResultConsumer from .base import BaseKeyValueStoreBackend try: import redis.connection from kombu.transport.redis import get_redis_error_classes except ImportError: redis = None get_redis_error_classes = None try: import redis.sentinel except ImportError: pass __all__ = ('RedisBackend', 'SentinelBackend') E_REDIS_MISSING = """ You need to install the redis library in order to use \ the Redis result store backend. """ E_REDIS_SENTINEL_MISSING = """ You need to install the redis library with support of \ sentinel in order to use the Redis result store backend. """ W_REDIS_SSL_CERT_OPTIONAL = """ Setting ssl_cert_reqs=CERT_OPTIONAL when connecting to redis means that \ celery might not validate the identity of the redis broker when connecting. \ This leaves you vulnerable to man in the middle attacks. """ W_REDIS_SSL_CERT_NONE = """ Setting ssl_cert_reqs=CERT_NONE when connecting to redis means that celery \ will not validate the identity of the redis broker when connecting. This \ leaves you vulnerable to man in the middle attacks. """ E_REDIS_SSL_PARAMS_AND_SCHEME_MISMATCH = """ SSL connection parameters have been provided but the specified URL scheme \ is redis://. A Redis SSL connection URL should use the scheme rediss://. """ E_REDIS_SSL_CERT_REQS_MISSING_INVALID = """ A rediss:// URL must have parameter ssl_cert_reqs and this must be set to \ CERT_REQUIRED, CERT_OPTIONAL, or CERT_NONE """ E_LOST = 'Connection to Redis lost: Retry (%s/%s) %s.' E_RETRY_LIMIT_EXCEEDED = """ Retry limit exceeded while trying to reconnect to the Celery redis result \ store backend. The Celery application must be restarted. """ logger = get_logger(__name__) class ResultConsumer(BaseResultConsumer): _pubsub = None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._get_key_for_task = self.backend.get_key_for_task self._decode_result = self.backend.decode_result self._ensure = self.backend.ensure self._connection_errors = self.backend.connection_errors self.subscribed_to = set() def on_after_fork(self): try: self.backend.client.connection_pool.reset() if self._pubsub is not None: self._pubsub.close() except KeyError as e: logger.warning(str(e)) super().on_after_fork() def _reconnect_pubsub(self): self._pubsub = None self.backend.client.connection_pool.reset() # task state might have changed when the connection was down so we # retrieve meta for all subscribed tasks before going into pubsub mode if self.subscribed_to: metas = self.backend.client.mget(self.subscribed_to) metas = [meta for meta in metas if meta] for meta in metas: self.on_state_change(self._decode_result(meta), None) self._pubsub = self.backend.client.pubsub( ignore_subscribe_messages=True, ) # subscribed_to maybe empty after on_state_change if self.subscribed_to: self._pubsub.subscribe(*self.subscribed_to) else: self._pubsub.connection = self._pubsub.connection_pool.get_connection( 'pubsub', self._pubsub.shard_hint ) # even if there is nothing to subscribe, we should not lose the callback after connecting. # The on_connect callback will re-subscribe to any channels we previously subscribed to. self._pubsub.connection.register_connect_callback(self._pubsub.on_connect) @contextmanager def reconnect_on_error(self): try: yield except self._connection_errors: try: self._ensure(self._reconnect_pubsub, ()) except self._connection_errors: logger.critical(E_RETRY_LIMIT_EXCEEDED) raise def _maybe_cancel_ready_task(self, meta): if meta['status'] in states.READY_STATES: self.cancel_for(meta['task_id']) def on_state_change(self, meta, message): super().on_state_change(meta, message) self._maybe_cancel_ready_task(meta) def start(self, initial_task_id, **kwargs): self._pubsub = self.backend.client.pubsub( ignore_subscribe_messages=True, ) self._consume_from(initial_task_id) def on_wait_for_pending(self, result, **kwargs): for meta in result._iter_meta(**kwargs): if meta is not None: self.on_state_change(meta, None) def stop(self): if self._pubsub is not None: self._pubsub.close() def drain_events(self, timeout=None): if self._pubsub: with self.reconnect_on_error(): message = self._pubsub.get_message(timeout=timeout) if message and message['type'] == 'message': self.on_state_change(self._decode_result(message['data']), message) elif timeout: time.sleep(timeout) def consume_from(self, task_id): if self._pubsub is None: return self.start(task_id) self._consume_from(task_id) def _consume_from(self, task_id): key = self._get_key_for_task(task_id) if key not in self.subscribed_to: self.subscribed_to.add(key) with self.reconnect_on_error(): self._pubsub.subscribe(key) def cancel_for(self, task_id): key = self._get_key_for_task(task_id) self.subscribed_to.discard(key) if self._pubsub: with self.reconnect_on_error(): self._pubsub.unsubscribe(key) class RedisBackend(BaseKeyValueStoreBackend, AsyncBackendMixin): """Redis task result store. It makes use of the following commands: GET, MGET, DEL, INCRBY, EXPIRE, SET, SETEX """ ResultConsumer = ResultConsumer #: :pypi:`redis` client module. redis = redis connection_class_ssl = redis.SSLConnection if redis else None #: Maximum number of connections in the pool. max_connections = None supports_autoexpire = True supports_native_join = True #: Maximal length of string value in Redis. #: 512 MB - https://redis.io/topics/data-types _MAX_STR_VALUE_SIZE = 536870912 def __init__(self, host=None, port=None, db=None, password=None, max_connections=None, url=None, connection_pool=None, **kwargs): super().__init__(expires_type=int, **kwargs) _get = self.app.conf.get if self.redis is None: raise ImproperlyConfigured(E_REDIS_MISSING.strip()) if host and '://' in host: url, host = host, None self.max_connections = ( max_connections or _get('redis_max_connections') or self.max_connections) self._ConnectionPool = connection_pool socket_timeout = _get('redis_socket_timeout') socket_connect_timeout = _get('redis_socket_connect_timeout') retry_on_timeout = _get('redis_retry_on_timeout') socket_keepalive = _get('redis_socket_keepalive') health_check_interval = _get('redis_backend_health_check_interval') self.connparams = { 'host': _get('redis_host') or 'localhost', 'port': _get('redis_port') or 6379, 'db': _get('redis_db') or 0, 'password': _get('redis_password'), 'max_connections': self.max_connections, 'socket_timeout': socket_timeout and float(socket_timeout), 'retry_on_timeout': retry_on_timeout or False, 'socket_connect_timeout': socket_connect_timeout and float(socket_connect_timeout), } username = _get('redis_username') if username: # We're extra careful to avoid including this configuration value # if it wasn't specified since older versions of py-redis # don't support specifying a username. # Only Redis>6.0 supports username/password authentication. # TODO: Include this in connparams' definition once we drop # support for py-redis<3.4.0. self.connparams['username'] = username if health_check_interval: self.connparams["health_check_interval"] = health_check_interval # absent in redis.connection.UnixDomainSocketConnection if socket_keepalive: self.connparams['socket_keepalive'] = socket_keepalive # "redis_backend_use_ssl" must be a dict with the keys: # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile' # (the same as "broker_use_ssl") ssl = _get('redis_backend_use_ssl') if ssl: self.connparams.update(ssl) self.connparams['connection_class'] = self.connection_class_ssl if url: self.connparams = self._params_from_url(url, self.connparams) # If we've received SSL parameters via query string or the # redis_backend_use_ssl dict, check ssl_cert_reqs is valid. If set # via query string ssl_cert_reqs will be a string so convert it here if ('connection_class' in self.connparams and issubclass(self.connparams['connection_class'], redis.SSLConnection)): ssl_cert_reqs_missing = 'MISSING' ssl_string_to_constant = {'CERT_REQUIRED': CERT_REQUIRED, 'CERT_OPTIONAL': CERT_OPTIONAL, 'CERT_NONE': CERT_NONE, 'required': CERT_REQUIRED, 'optional': CERT_OPTIONAL, 'none': CERT_NONE} ssl_cert_reqs = self.connparams.get('ssl_cert_reqs', ssl_cert_reqs_missing) ssl_cert_reqs = ssl_string_to_constant.get(ssl_cert_reqs, ssl_cert_reqs) if ssl_cert_reqs not in ssl_string_to_constant.values(): raise ValueError(E_REDIS_SSL_CERT_REQS_MISSING_INVALID) if ssl_cert_reqs == CERT_OPTIONAL: logger.warning(W_REDIS_SSL_CERT_OPTIONAL) elif ssl_cert_reqs == CERT_NONE: logger.warning(W_REDIS_SSL_CERT_NONE) self.connparams['ssl_cert_reqs'] = ssl_cert_reqs self.url = url self.connection_errors, self.channel_errors = ( get_redis_error_classes() if get_redis_error_classes else ((), ())) self.result_consumer = self.ResultConsumer( self, self.app, self.accept, self._pending_results, self._pending_messages, ) def _params_from_url(self, url, defaults): scheme, host, port, username, password, path, query = _parse_url(url) connparams = dict( defaults, **dictfilter({ 'host': host, 'port': port, 'username': username, 'password': password, 'db': query.pop('virtual_host', None)}) ) if scheme == 'socket': # use 'path' as path to the socket… in this case # the database number should be given in 'query' connparams.update({ 'connection_class': self.redis.UnixDomainSocketConnection, 'path': '/' + path, }) # host+port are invalid options when using this connection type. connparams.pop('host', None) connparams.pop('port', None) connparams.pop('socket_connect_timeout') else: connparams['db'] = path ssl_param_keys = ['ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile', 'ssl_cert_reqs'] if scheme == 'redis': # If connparams or query string contain ssl params, raise error if (any(key in connparams for key in ssl_param_keys) or any(key in query for key in ssl_param_keys)): raise ValueError(E_REDIS_SSL_PARAMS_AND_SCHEME_MISMATCH) if scheme == 'rediss': connparams['connection_class'] = redis.SSLConnection # The following parameters, if present in the URL, are encoded. We # must add the decoded values to connparams. for ssl_setting in ssl_param_keys: ssl_val = query.pop(ssl_setting, None) if ssl_val: connparams[ssl_setting] = unquote(ssl_val) # db may be string and start with / like in kombu. db = connparams.get('db') or 0 db = db.strip('/') if isinstance(db, str) else db connparams['db'] = int(db) for key, value in query.items(): if key in redis.connection.URL_QUERY_ARGUMENT_PARSERS: query[key] = redis.connection.URL_QUERY_ARGUMENT_PARSERS[key]( value ) # Query parameters override other parameters connparams.update(query) return connparams @cached_property def retry_policy(self): retry_policy = super().retry_policy if "retry_policy" in self._transport_options: retry_policy = retry_policy.copy() retry_policy.update(self._transport_options['retry_policy']) return retry_policy def on_task_call(self, producer, task_id): if not task_join_will_block(): self.result_consumer.consume_from(task_id) def get(self, key): return self.client.get(key) def mget(self, keys): return self.client.mget(keys) def ensure(self, fun, args, **policy): retry_policy = dict(self.retry_policy, **policy) max_retries = retry_policy.get('max_retries') return retry_over_time( fun, self.connection_errors, args, {}, partial(self.on_connection_error, max_retries), **retry_policy) def on_connection_error(self, max_retries, exc, intervals, retries): tts = next(intervals) logger.error( E_LOST.strip(), retries, max_retries or 'Inf', humanize_seconds(tts, 'in ')) return tts def set(self, key, value, **retry_policy): if isinstance(value, str) and len(value) > self._MAX_STR_VALUE_SIZE: raise BackendStoreError('value too large for Redis backend') return self.ensure(self._set, (key, value), **retry_policy) def _set(self, key, value): with self.client.pipeline() as pipe: if self.expires: pipe.setex(key, self.expires, value) else: pipe.set(key, value) pipe.publish(key, value) pipe.execute() def forget(self, task_id): super().forget(task_id) self.result_consumer.cancel_for(task_id) def delete(self, key): self.client.delete(key) def incr(self, key): return self.client.incr(key) def expire(self, key, value): return self.client.expire(key, value) def add_to_chord(self, group_id, result): self.client.incr(self.get_key_for_group(group_id, '.t'), 1) def _unpack_chord_result(self, tup, decode, EXCEPTION_STATES=states.EXCEPTION_STATES, PROPAGATE_STATES=states.PROPAGATE_STATES): _, tid, state, retval = decode(tup) if state in EXCEPTION_STATES: retval = self.exception_to_python(retval) if state in PROPAGATE_STATES: raise ChordError(f'Dependency {tid} raised {retval!r}') return retval def set_chord_size(self, group_id, chord_size): self.set(self.get_key_for_group(group_id, '.s'), chord_size) def apply_chord(self, header_result_args, body, **kwargs): # If any of the child results of this chord are complex (ie. group # results themselves), we need to save `header_result` to ensure that # the expected structure is retained when we finish the chord and pass # the results onward to the body in `on_chord_part_return()`. We don't # do this is all cases to retain an optimisation in the common case # where a chord header is comprised of simple result objects. if not isinstance(header_result_args[1], _regen): header_result = self.app.GroupResult(*header_result_args) if any(isinstance(nr, GroupResult) for nr in header_result.results): header_result.save(backend=self) @cached_property def _chord_zset(self): return self._transport_options.get('result_chord_ordered', True) @cached_property def _transport_options(self): return self.app.conf.get('result_backend_transport_options', {}) def on_chord_part_return(self, request, state, result, propagate=None, **kwargs): app = self.app tid, gid, group_index = request.id, request.group, request.group_index if not gid or not tid: return if group_index is None: group_index = '+inf' client = self.client jkey = self.get_key_for_group(gid, '.j') tkey = self.get_key_for_group(gid, '.t') skey = self.get_key_for_group(gid, '.s') result = self.encode_result(result, state) encoded = self.encode([1, tid, state, result]) with client.pipeline() as pipe: pipeline = ( pipe.zadd(jkey, {encoded: group_index}).zcount(jkey, "-inf", "+inf") if self._chord_zset else pipe.rpush(jkey, encoded).llen(jkey) ).get(tkey).get(skey) if self.expires: pipeline = pipeline \ .expire(jkey, self.expires) \ .expire(tkey, self.expires) \ .expire(skey, self.expires) _, readycount, totaldiff, chord_size_bytes = pipeline.execute()[:4] totaldiff = int(totaldiff or 0) if chord_size_bytes: try: callback = maybe_signature(request.chord, app=app) total = int(chord_size_bytes) + totaldiff if readycount == total: header_result = GroupResult.restore(gid) if header_result is not None: # If we manage to restore a `GroupResult`, then it must # have been complex and saved by `apply_chord()` earlier. # # Before we can join the `GroupResult`, it needs to be # manually marked as ready to avoid blocking header_result.on_ready() # We'll `join()` it to get the results and ensure they are # structured as intended rather than the flattened version # we'd construct without any other information. join_func = ( header_result.join_native if header_result.supports_native_join else header_result.join ) with allow_join_result(): resl = join_func( timeout=app.conf.result_chord_join_timeout, propagate=True ) else: # Otherwise simply extract and decode the results we # stashed along the way, which should be faster for large # numbers of simple results in the chord header. decode, unpack = self.decode, self._unpack_chord_result with client.pipeline() as pipe: if self._chord_zset: pipeline = pipe.zrange(jkey, 0, -1) else: pipeline = pipe.lrange(jkey, 0, total) resl, = pipeline.execute() resl = [unpack(tup, decode) for tup in resl] try: callback.delay(resl) except Exception as exc: # pylint: disable=broad-except logger.exception( 'Chord callback for %r raised: %r', request.group, exc) return self.chord_error_from_stack( callback, ChordError(f'Callback error: {exc!r}'), ) finally: with client.pipeline() as pipe: pipe \ .delete(jkey) \ .delete(tkey) \ .delete(skey) \ .execute() except ChordError as exc: logger.exception('Chord %r raised: %r', request.group, exc) return self.chord_error_from_stack(callback, exc) except Exception as exc: # pylint: disable=broad-except logger.exception('Chord %r raised: %r', request.group, exc) return self.chord_error_from_stack( callback, ChordError(f'Join error: {exc!r}'), ) def _create_client(self, **params): return self._get_client()( connection_pool=self._get_pool(**params), ) def _get_client(self): return self.redis.StrictRedis def _get_pool(self, **params): return self.ConnectionPool(**params) @property def ConnectionPool(self): if self._ConnectionPool is None: self._ConnectionPool = self.redis.ConnectionPool return self._ConnectionPool @cached_property def client(self): return self._create_client(**self.connparams) def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs return super().__reduce__( args, dict(kwargs, expires=self.expires, url=self.url)) if getattr(redis, "sentinel", None): class SentinelManagedSSLConnection( redis.sentinel.SentinelManagedConnection, redis.SSLConnection): """Connect to a Redis server using Sentinel + TLS. Use Sentinel to identify which Redis server is the current master to connect to and when connecting to the Master server, use an SSL Connection. """ class SentinelBackend(RedisBackend): """Redis sentinel task result store.""" # URL looks like `sentinel://0.0.0.0:26347/3;sentinel://0.0.0.0:26348/3` _SERVER_URI_SEPARATOR = ";" sentinel = getattr(redis, "sentinel", None) connection_class_ssl = SentinelManagedSSLConnection if sentinel else None def __init__(self, *args, **kwargs): if self.sentinel is None: raise ImproperlyConfigured(E_REDIS_SENTINEL_MISSING.strip()) super().__init__(*args, **kwargs) def as_uri(self, include_password=False): """Return the server addresses as URIs, sanitizing the password or not.""" # Allow superclass to do work if we don't need to force sanitization if include_password: return super().as_uri( include_password=include_password, ) # Otherwise we need to ensure that all components get sanitized rather # by passing them one by one to the `kombu` helper uri_chunks = ( maybe_sanitize_url(chunk) for chunk in (self.url or "").split(self._SERVER_URI_SEPARATOR) ) # Similar to the superclass, strip the trailing slash from URIs with # all components empty other than the scheme return self._SERVER_URI_SEPARATOR.join( uri[:-1] if uri.endswith(":///") else uri for uri in uri_chunks ) def _params_from_url(self, url, defaults): chunks = url.split(self._SERVER_URI_SEPARATOR) connparams = dict(defaults, hosts=[]) for chunk in chunks: data = super()._params_from_url( url=chunk, defaults=defaults) connparams['hosts'].append(data) for param in ("host", "port", "db", "password"): connparams.pop(param) # Adding db/password in connparams to connect to the correct instance for param in ("db", "password"): if connparams['hosts'] and param in connparams['hosts'][0]: connparams[param] = connparams['hosts'][0].get(param) return connparams def _get_sentinel_instance(self, **params): connparams = params.copy() hosts = connparams.pop("hosts") min_other_sentinels = self._transport_options.get("min_other_sentinels", 0) sentinel_kwargs = self._transport_options.get("sentinel_kwargs", {}) sentinel_instance = self.sentinel.Sentinel( [(cp['host'], cp['port']) for cp in hosts], min_other_sentinels=min_other_sentinels, sentinel_kwargs=sentinel_kwargs, **connparams) return sentinel_instance def _get_pool(self, **params): sentinel_instance = self._get_sentinel_instance(**params) master_name = self._transport_options.get("master_name", None) return sentinel_instance.master_for( service_name=master_name, redis_class=self._get_client(), ).connection_pool