PNG  IHDRX cHRMz&u0`:pQ<bKGD pHYsodtIME MeqIDATxw]Wug^Qd˶ 6`!N:!@xI~)%7%@Bh&`lnjVF29gΨ4E$|>cɚ{gk= %,a KX%,a KX%,a KX%,a KX%,a KX%,a KX%, b` ǟzeאfp]<!SJmɤY޲ڿ,%c ~ع9VH.!Ͳz&QynֺTkRR.BLHi٪:l;@(!MԴ=žI,:o&N'Kù\vRmJ雵֫AWic H@" !: Cé||]k-Ha oݜ:y F())u]aG7*JV@J415p=sZH!=!DRʯvɱh~V\}v/GKY$n]"X"}t@ xS76^[bw4dsce)2dU0 CkMa-U5tvLƀ~mlMwfGE/-]7XAƟ`׮g ewxwC4\[~7@O-Q( a*XGƒ{ ՟}$_y3tĐƤatgvێi|K=uVyrŲlLӪuܿzwk$m87k( `múcE)"@rK( z4$D; 2kW=Xb$V[Ru819קR~qloѱDyįݎ*mxw]y5e4K@ЃI0A D@"BDk_)N\8͜9dz"fK0zɿvM /.:2O{ Nb=M=7>??Zuo32 DLD@D| &+֎C #B8ַ`bOb $D#ͮҪtx]%`ES`Ru[=¾!@Od37LJ0!OIR4m]GZRJu$‡c=%~s@6SKy?CeIh:[vR@Lh | (BhAMy=݃  G"'wzn޺~8ԽSh ~T*A:xR[ܹ?X[uKL_=fDȊ؂p0}7=D$Ekq!/t.*2ʼnDbŞ}DijYaȲ(""6HA;:LzxQ‘(SQQ}*PL*fc\s `/d'QXW, e`#kPGZuŞuO{{wm[&NBTiiI0bukcA9<4@SӊH*؎4U/'2U5.(9JuDfrޱtycU%j(:RUbArLֺN)udA':uGQN"-"Is.*+k@ `Ojs@yU/ H:l;@yyTn}_yw!VkRJ4P)~y#)r,D =ě"Q]ci'%HI4ZL0"MJy 8A{ aN<8D"1#IJi >XjX֔#@>-{vN!8tRݻ^)N_╗FJEk]CT՟ YP:_|H1@ CBk]yKYp|og?*dGvzنzӴzjֺNkC~AbZƷ`.H)=!QͷVTT(| u78y֮}|[8-Vjp%2JPk[}ԉaH8Wpqhwr:vWª<}l77_~{s۴V+RCģ%WRZ\AqHifɤL36: #F:p]Bq/z{0CU6ݳEv_^k7'>sq*+kH%a`0ԣisqにtү04gVgW΂iJiS'3w.w}l6MC2uԯ|>JF5`fV5m`Y**Db1FKNttu]4ccsQNnex/87+}xaUW9y>ͯ骵G{䩓Գ3+vU}~jJ.NFRD7<aJDB1#ҳgSb,+CS?/ VG J?|?,2#M9}B)MiE+G`-wo߫V`fio(}S^4e~V4bHOYb"b#E)dda:'?}׮4繏`{7Z"uny-?ǹ;0MKx{:_pÚmFמ:F " .LFQLG)Q8qN q¯¯3wOvxDb\. BKD9_NN &L:4D{mm o^tֽ:q!ƥ}K+<"m78N< ywsard5+вz~mnG)=}lYݧNj'QJS{S :UYS-952?&O-:W}(!6Mk4+>A>j+i|<<|;ر^߉=HE|V#F)Emm#}/"y GII웻Jі94+v뾧xu~5C95~ūH>c@덉pʃ1/4-A2G%7>m;–Y,cyyaln" ?ƻ!ʪ<{~h~i y.zZB̃/,雋SiC/JFMmBH&&FAbϓO^tubbb_hZ{_QZ-sύodFgO(6]TJA˯#`۶ɟ( %$&+V'~hiYy>922 Wp74Zkq+Ovn錄c>8~GqܲcWꂎz@"1A.}T)uiW4="jJ2W7mU/N0gcqܗOO}?9/wìXžΏ0 >֩(V^Rh32!Hj5`;O28؇2#ݕf3 ?sJd8NJ@7O0 b־?lldщ̡&|9C.8RTWwxWy46ah嘦mh٤&l zCy!PY?: CJyв]dm4ǜҐR޻RլhX{FƯanшQI@x' ao(kUUuxW_Ñ줮[w8 FRJ(8˼)_mQ _!RJhm=!cVmm ?sFOnll6Qk}alY}; "baӌ~M0w,Ggw2W:G/k2%R,_=u`WU R.9T"v,<\Ik޽/2110Ӿxc0gyC&Ny޽JҢrV6N ``یeA16"J³+Rj*;BϜkZPJaÍ<Jyw:NP8/D$ 011z֊Ⱳ3ι֘k1V_"h!JPIΣ'ɜ* aEAd:ݺ>y<}Lp&PlRfTb1]o .2EW\ͮ]38؋rTJsǏP@芎sF\> P^+dYJLbJ C-xϐn> ι$nj,;Ǖa FU *择|h ~izť3ᤓ`K'-f tL7JK+vf2)V'-sFuB4i+m+@My=O҈0"|Yxoj,3]:cо3 $#uŘ%Y"y죯LebqtҢVzq¼X)~>4L׶m~[1_k?kxֺQ`\ |ٛY4Ѯr!)N9{56(iNq}O()Em]=F&u?$HypWUeB\k]JɩSع9 Zqg4ZĊo oMcjZBU]B\TUd34ݝ~:7ڶSUsB0Z3srx 7`:5xcx !qZA!;%͚7&P H<WL!džOb5kF)xor^aujƍ7 Ǡ8/p^(L>ὴ-B,{ۇWzֺ^k]3\EE@7>lYBȝR.oHnXO/}sB|.i@ɥDB4tcm,@ӣgdtJ!lH$_vN166L__'Z)y&kH;:,Y7=J 9cG) V\hjiE;gya~%ks_nC~Er er)muuMg2;֫R)Md) ,¶ 2-wr#F7<-BBn~_(o=KO㭇[Xv eN_SMgSҐ BS헃D%g_N:/pe -wkG*9yYSZS.9cREL !k}<4_Xs#FmҶ:7R$i,fi!~' # !6/S6y@kZkZcX)%5V4P]VGYq%H1!;e1MV<!ϐHO021Dp= HMs~~a)ަu7G^];git!Frl]H/L$=AeUvZE4P\.,xi {-~p?2b#amXAHq)MWǾI_r`S Hz&|{ +ʖ_= (YS(_g0a03M`I&'9vl?MM+m~}*xT۲(fY*V4x@29s{DaY"toGNTO+xCAO~4Ϳ;p`Ѫ:>Ҵ7K 3}+0 387x\)a"/E>qpWB=1 ¨"MP(\xp߫́A3+J] n[ʼnӼaTbZUWb={~2ooKױӰp(CS\S筐R*JغV&&"FA}J>G֐p1ٸbk7 ŘH$JoN <8s^yk_[;gy-;߉DV{c B yce% aJhDȶ 2IdйIB/^n0tNtџdcKj4϶v~- CBcgqx9= PJ) dMsjpYB] GD4RDWX +h{y`,3ꊕ$`zj*N^TP4L:Iz9~6s) Ga:?y*J~?OrMwP\](21sZUD ?ܟQ5Q%ggW6QdO+\@ ̪X'GxN @'4=ˋ+*VwN ne_|(/BDfj5(Dq<*tNt1х!MV.C0 32b#?n0pzj#!38}޴o1KovCJ`8ŗ_"]] rDUy޲@ Ȗ-;xџ'^Y`zEd?0„ DAL18IS]VGq\4o !swV7ˣι%4FѮ~}6)OgS[~Q vcYbL!wG3 7띸*E Pql8=jT\꘿I(z<[6OrR8ºC~ډ]=rNl[g|v TMTղb-o}OrP^Q]<98S¤!k)G(Vkwyqyr޽Nv`N/e p/~NAOk \I:G6]4+K;j$R:Mi #*[AȚT,ʰ,;N{HZTGMoּy) ]%dHء9Պ䠬|<45,\=[bƟ8QXeB3- &dҩ^{>/86bXmZ]]yޚN[(WAHL$YAgDKp=5GHjU&99v簪C0vygln*P)9^͞}lMuiH!̍#DoRBn9l@ xA/_v=ȺT{7Yt2N"4!YN`ae >Q<XMydEB`VU}u]嫇.%e^ánE87Mu\t`cP=AD/G)sI"@MP;)]%fH9'FNsj1pVhY&9=0pfuJ&gޤx+k:!r˭wkl03׼Ku C &ѓYt{.O.zҏ z}/tf_wEp2gvX)GN#I ݭ߽v/ .& и(ZF{e"=V!{zW`, ]+LGz"(UJp|j( #V4, 8B 0 9OkRrlɱl94)'VH9=9W|>PS['G(*I1==C<5"Pg+x'K5EMd؞Af8lG ?D FtoB[je?{k3zQ vZ;%Ɠ,]E>KZ+T/ EJxOZ1i #T<@ I}q9/t'zi(EMqw`mYkU6;[t4DPeckeM;H}_g pMww}k6#H㶏+b8雡Sxp)&C $@'b,fPߑt$RbJ'vznuS ~8='72_`{q纶|Q)Xk}cPz9p7O:'|G~8wx(a 0QCko|0ASD>Ip=4Q, d|F8RcU"/KM opKle M3#i0c%<7׿p&pZq[TR"BpqauIp$ 8~Ĩ!8Սx\ւdT>>Z40ks7 z2IQ}ItԀ<-%S⍤};zIb$I 5K}Q͙D8UguWE$Jh )cu4N tZl+[]M4k8֦Zeq֮M7uIqG 1==tLtR,ƜSrHYt&QP윯Lg' I,3@P'}'R˪e/%-Auv·ñ\> vDJzlӾNv5:|K/Jb6KI9)Zh*ZAi`?S {aiVDԲuy5W7pWeQJk֤#5&V<̺@/GH?^τZL|IJNvI:'P=Ϛt"¨=cud S Q.Ki0 !cJy;LJR;G{BJy޺[^8fK6)=yʊ+(k|&xQ2`L?Ȓ2@Mf 0C`6-%pKpm')c$׻K5[J*U[/#hH!6acB JA _|uMvDyk y)6OPYjœ50VT K}cǻP[ $:]4MEA.y)|B)cf-A?(e|lɉ#P9V)[9t.EiQPDѠ3ϴ;E:+Օ t ȥ~|_N2,ZJLt4! %ա]u {+=p.GhNcŞQI?Nd'yeh n7zi1DB)1S | S#ًZs2|Ɛy$F SxeX{7Vl.Src3E℃Q>b6G ўYCmtկ~=K0f(=LrAS GN'ɹ9<\!a`)֕y[uՍ[09` 9 +57ts6}b4{oqd+J5fa/,97J#6yν99mRWxJyѡyu_TJc`~W>l^q#Ts#2"nD1%fS)FU w{ܯ R{ ˎ󅃏џDsZSQS;LV;7 Od1&1n$ N /.q3~eNɪ]E#oM~}v֯FڦwyZ=<<>Xo稯lfMFV6p02|*=tV!c~]fa5Y^Q_WN|Vs 0ҘދU97OI'N2'8N֭fgg-}V%y]U4 峧p*91#9U kCac_AFңĪy뚇Y_AiuYyTTYЗ-(!JFLt›17uTozc. S;7A&&<ԋ5y;Ro+:' *eYJkWR[@F %SHWP 72k4 qLd'J "zB6{AC0ƁA6U.'F3:Ȅ(9ΜL;D]m8ڥ9}dU "v!;*13Rg^fJyShyy5auA?ɩGHRjo^]׽S)Fm\toy 4WQS@mE#%5ʈfFYDX ~D5Ϡ9tE9So_aU4?Ѽm%&c{n>.KW1Tlb}:j uGi(JgcYj0qn+>) %\!4{LaJso d||u//P_y7iRJ߬nHOy) l+@$($VFIQ9%EeKʈU. ia&FY̒mZ=)+qqoQn >L!qCiDB;Y<%} OgBxB!ØuG)WG9y(Ą{_yesuZmZZey'Wg#C~1Cev@0D $a@˲(.._GimA:uyw֬%;@!JkQVM_Ow:P.s\)ot- ˹"`B,e CRtaEUP<0'}r3[>?G8xU~Nqu;Wm8\RIkբ^5@k+5(By'L&'gBJ3ݶ!/㮻w҅ yqPWUg<e"Qy*167΃sJ\oz]T*UQ<\FԎ`HaNmڜ6DysCask8wP8y9``GJ9lF\G g's Nn͵MLN֪u$| /|7=]O)6s !ĴAKh]q_ap $HH'\1jB^s\|- W1:=6lJBqjY^LsPk""`]w)󭃈,(HC ?䔨Y$Sʣ{4Z+0NvQkhol6C.婧/u]FwiVjZka&%6\F*Ny#8O,22+|Db~d ~Çwc N:FuuCe&oZ(l;@ee-+Wn`44AMK➝2BRՈt7g*1gph9N) *"TF*R(#'88pm=}X]u[i7bEc|\~EMn}P瘊J)K.0i1M6=7'_\kaZ(Th{K*GJyytw"IO-PWJk)..axӝ47"89Cc7ĐBiZx 7m!fy|ϿF9CbȩV 9V-՛^pV̌ɄS#Bv4-@]Vxt-Z, &ֺ*diؠ2^VXbs֔Ìl.jQ]Y[47gj=幽ex)A0ip׳ W2[ᎇhuE^~q흙L} #-b۸oFJ_QP3r6jr+"nfzRJTUqoaۍ /$d8Mx'ݓ= OՃ| )$2mcM*cЙj}f };n YG w0Ia!1Q.oYfr]DyISaP}"dIӗթO67jqR ҊƐƈaɤGG|h;t]䗖oSv|iZqX)oalv;۩meEJ\!8=$4QU4Xo&VEĊ YS^E#d,yX_> ۘ-e\ "Wa6uLĜZi`aD9.% w~mB(02G[6y.773a7 /=o7D)$Z 66 $bY^\CuP. (x'"J60׿Y:Oi;F{w佩b+\Yi`TDWa~|VH)8q/=9!g߆2Y)?ND)%?Ǐ`k/sn:;O299yB=a[Ng 3˲N}vLNy;*?x?~L&=xyӴ~}q{qE*IQ^^ͧvü{Huu=R|>JyUlZV, B~/YF!Y\u_ݼF{_C)LD]m {H 0ihhadd nUkf3oٺCvE\)QJi+֥@tDJkB$1!Đr0XQ|q?d2) Ӣ_}qv-< FŊ߫%roppVBwü~JidY4:}L6M7f٬F "?71<2#?Jyy4뷢<_a7_=Q E=S1И/9{+93֮E{ǂw{))?maÆm(uLE#lïZ  ~d];+]h j?!|$F}*"4(v'8s<ŏUkm7^7no1w2ؗ}TrͿEk>p'8OB7d7R(A 9.*Mi^ͳ; eeUwS+C)uO@ =Sy]` }l8^ZzRXj[^iUɺ$tj))<sbDJfg=Pk_{xaKo1:-uyG0M ԃ\0Lvuy'ȱc2Ji AdyVgVh!{]/&}}ċJ#%d !+87<;qN޼Nفl|1N:8ya  8}k¾+-$4FiZYÔXk*I&'@iI99)HSh4+2G:tGhS^繿 Kتm0 вDk}֚+QT4;sC}rՅE,8CX-e~>G&'9xpW,%Fh,Ry56Y–hW-(v_,? ; qrBk4-V7HQ;ˇ^Gv1JVV%,ik;D_W!))+BoS4QsTM;gt+ndS-~:11Sgv!0qRVh!"Ȋ(̦Yl.]PQWgٳE'`%W1{ndΗBk|Ž7ʒR~,lnoa&:ü$ 3<a[CBݮwt"o\ePJ=Hz"_c^Z.#ˆ*x z̝grY]tdkP*:97YľXyBkD4N.C_[;F9`8& !AMO c `@BA& Ost\-\NX+Xp < !bj3C&QL+*&kAQ=04}cC!9~820G'PC9xa!w&bo_1 Sw"ܱ V )Yl3+ס2KoXOx]"`^WOy :3GO0g;%Yv㐫(R/r (s } u B &FeYZh0y> =2<Ϟc/ -u= c&׭,.0"g"7 6T!vl#sc>{u/Oh Bᾈ)۴74]x7 gMӒ"d]U)}" v4co[ ɡs 5Gg=XR14?5A}D "b{0$L .\4y{_fe:kVS\\O]c^W52LSBDM! C3Dhr̦RtArx4&agaN3Cf<Ԉp4~ B'"1@.b_/xQ} _߃҉/gٓ2Qkqp0շpZ2fԫYz< 4L.Cyυι1t@鎫Fe sYfsF}^ V}N<_`p)alٶ "(XEAVZ<)2},:Ir*#m_YӼ R%a||EƼIJ,,+f"96r/}0jE/)s)cjW#w'Sʯ5<66lj$a~3Kʛy 2:cZ:Yh))+a߭K::N,Q F'qB]={.]h85C9cr=}*rk?vwV렵ٸW Rs%}rNAkDv|uFLBkWY YkX מ|)1!$#3%y?pF<@<Rr0}: }\J [5FRxY<9"SQdE(Q*Qʻ)q1E0B_O24[U'],lOb ]~WjHޏTQ5Syu wq)xnw8~)c 쫬gٲߠ H% k5dƝk> kEj,0% b"vi2Wس_CuK)K{n|>t{P1򨾜j>'kEkƗBg*H%'_aY6Bn!TL&ɌOb{c`'d^{t\i^[uɐ[}q0lM˕G:‚4kb祔c^:?bpg… +37stH:0}en6x˟%/<]BL&* 5&fK9Mq)/iyqtA%kUe[ڛKN]Ě^,"`/ s[EQQm?|XJ߅92m]G.E΃ח U*Cn.j_)Tѧj̿30ڇ!A0=͜ar I3$C^-9#|pk!)?7.x9 @OO;WƝZBFU keZ75F6Tc6"ZȚs2y/1 ʵ:u4xa`C>6Rb/Yм)^=+~uRd`/|_8xbB0?Ft||Z\##|K 0>>zxv8۴吅q 8ĥ)"6>~\8:qM}#͚'ĉ#p\׶ l#bA?)|g g9|8jP(cr,BwV (WliVxxᡁ@0Okn;ɥh$_ckCgriv}>=wGzβ KkBɛ[˪ !J)h&k2%07δt}!d<9;I&0wV/ v 0<H}L&8ob%Hi|޶o&h1L|u֦y~󛱢8fٲUsւ)0oiFx2}X[zVYr_;N(w]_4B@OanC?gĦx>мgx>ΛToZoOMp>40>V Oy V9iq!4 LN,ˢu{jsz]|"R޻&'ƚ{53ўFu(<٪9:΋]B;)B>1::8;~)Yt|0(pw2N%&X,URBK)3\zz&}ax4;ǟ(tLNg{N|Ǽ\G#C9g$^\}p?556]/RP.90 k,U8/u776s ʪ_01چ|\N 0VV*3H鴃J7iI!wG_^ypl}r*jɤSR 5QN@ iZ#1ٰy;_\3\BQQ x:WJv츟ٯ$"@6 S#qe딇(/P( Dy~TOϻ<4:-+F`0||;Xl-"uw$Цi󼕝mKʩorz"mϺ$F:~E'ҐvD\y?Rr8_He@ e~O,T.(ފR*cY^m|cVR[8 JҡSm!ΆԨb)RHG{?MpqrmN>߶Y)\p,d#xۆWY*,l6]v0h15M˙MS8+EdI='LBJIH7_9{Caз*Lq,dt >+~ّeʏ?xԕ4bBAŚjﵫ!'\Ը$WNvKO}ӽmSşذqsOy?\[,d@'73'j%kOe`1.g2"e =YIzS2|zŐƄa\U,dP;jhhhaxǶ?КZ՚.q SE+XrbOu%\GتX(H,N^~]JyEZQKceTQ]VGYqnah;y$cQahT&QPZ*iZ8UQQM.qo/T\7X"u?Mttl2Xq(IoW{R^ ux*SYJ! 4S.Jy~ BROS[V|žKNɛP(L6V^|cR7i7nZW1Fd@ Ara{詑|(T*dN]Ko?s=@ |_EvF]׍kR)eBJc" MUUbY6`~V޴dJKß&~'d3i WWWWWW
Current Directory: /opt/imh-python/lib/python3.9/site-packages/celery/worker/consumer
Viewing File: /opt/imh-python/lib/python3.9/site-packages/celery/worker/consumer/consumer.py
"""Worker Consumer Blueprint. This module contains the components responsible for consuming messages from the broker, processing the messages and keeping the broker connections up and running. """ import errno import logging import os import warnings from collections import defaultdict from time import sleep from billiard.common import restart_state from billiard.exceptions import RestartFreqExceeded from kombu.asynchronous.semaphore import DummyLock from kombu.exceptions import ContentDisallowed, DecodeError from kombu.utils.compat import _detect_environment from kombu.utils.encoding import safe_repr from kombu.utils.limits import TokenBucket from vine import ppartial, promise from celery import bootsteps, signals from celery.app.trace import build_tracer from celery.exceptions import (CPendingDeprecationWarning, InvalidTaskError, NotRegistered, WorkerShutdown, WorkerTerminate) from celery.utils.functional import noop from celery.utils.log import get_logger from celery.utils.nodenames import gethostname from celery.utils.objects import Bunch from celery.utils.text import truncate from celery.utils.time import humanize_seconds, rate from celery.worker import loops from celery.worker.state import active_requests, maybe_shutdown, requests, reserved_requests, task_reserved __all__ = ('Consumer', 'Evloop', 'dump_body') CLOSE = bootsteps.CLOSE TERMINATE = bootsteps.TERMINATE STOP_CONDITIONS = {CLOSE, TERMINATE} logger = get_logger(__name__) debug, info, warn, error, crit = (logger.debug, logger.info, logger.warning, logger.error, logger.critical) CONNECTION_RETRY = """\ consumer: Connection to broker lost. \ Trying to re-establish the connection...\ """ CONNECTION_RETRY_STEP = """\ Trying again {when}... ({retries}/{max_retries})\ """ CONNECTION_ERROR = """\ consumer: Cannot connect to %s: %s. %s """ CONNECTION_FAILOVER = """\ Will retry using next failover.\ """ UNKNOWN_FORMAT = """\ Received and deleted unknown message. Wrong destination?!? The full contents of the message body was: %s """ #: Error message for when an unregistered task is received. UNKNOWN_TASK_ERROR = """\ Received unregistered task of type %s. The message has been ignored and discarded. Did you remember to import the module containing this task? Or maybe you're using relative imports? Please see https://docs.celeryq.dev/en/latest/internals/protocol.html for more information. The full contents of the message body was: %s The full contents of the message headers: %s The delivery info for this task is: %s """ #: Error message for when an invalid task message is received. INVALID_TASK_ERROR = """\ Received invalid task message: %s The message has been ignored and discarded. Please ensure your message conforms to the task message protocol as described here: https://docs.celeryq.dev/en/latest/internals/protocol.html The full contents of the message body was: %s """ MESSAGE_DECODE_ERROR = """\ Can't decode message body: %r [type:%r encoding:%r headers:%s] body: %s """ MESSAGE_REPORT = """\ body: {0} {{content_type:{1} content_encoding:{2} delivery_info:{3} headers={4}}} """ TERMINATING_TASK_ON_RESTART_AFTER_A_CONNECTION_LOSS = """\ Task %s cannot be acknowledged after a connection loss since late acknowledgement is enabled for it. Terminating it instead. """ CANCEL_TASKS_BY_DEFAULT = """ In Celery 5.1 we introduced an optional breaking change which on connection loss cancels all currently executed tasks with late acknowledgement enabled. These tasks cannot be acknowledged as the connection is gone, and the tasks are automatically redelivered back to the queue. You can enable this behavior using the worker_cancel_long_running_tasks_on_connection_loss setting. In Celery 5.1 it is set to False by default. The setting will be set to True by default in Celery 6.0. """ def dump_body(m, body): """Format message body for debugging purposes.""" # v2 protocol does not deserialize body body = m.body if body is None else body return '{} ({}b)'.format(truncate(safe_repr(body), 1024), len(m.body)) class Consumer: """Consumer blueprint.""" Strategies = dict #: Optional callback called the first time the worker #: is ready to receive tasks. init_callback = None #: The current worker pool instance. pool = None #: A timer used for high-priority internal tasks, such #: as sending heartbeats. timer = None restart_count = -1 # first start is the same as a restart #: This flag will be turned off after the first failed #: connection attempt. first_connection_attempt = True class Blueprint(bootsteps.Blueprint): """Consumer blueprint.""" name = 'Consumer' default_steps = [ 'celery.worker.consumer.connection:Connection', 'celery.worker.consumer.mingle:Mingle', 'celery.worker.consumer.events:Events', 'celery.worker.consumer.gossip:Gossip', 'celery.worker.consumer.heart:Heart', 'celery.worker.consumer.control:Control', 'celery.worker.consumer.tasks:Tasks', 'celery.worker.consumer.consumer:Evloop', 'celery.worker.consumer.agent:Agent', ] def shutdown(self, parent): self.send_all(parent, 'shutdown') def __init__(self, on_task_request, init_callback=noop, hostname=None, pool=None, app=None, timer=None, controller=None, hub=None, amqheartbeat=None, worker_options=None, disable_rate_limits=False, initial_prefetch_count=2, prefetch_multiplier=1, **kwargs): self.app = app self.controller = controller self.init_callback = init_callback self.hostname = hostname or gethostname() self.pid = os.getpid() self.pool = pool self.timer = timer self.strategies = self.Strategies() self.conninfo = self.app.connection_for_read() self.connection_errors = self.conninfo.connection_errors self.channel_errors = self.conninfo.channel_errors self._restart_state = restart_state(maxR=5, maxT=1) self._does_info = logger.isEnabledFor(logging.INFO) self._limit_order = 0 self.on_task_request = on_task_request self.on_task_message = set() self.amqheartbeat_rate = self.app.conf.broker_heartbeat_checkrate self.disable_rate_limits = disable_rate_limits self.initial_prefetch_count = initial_prefetch_count self.prefetch_multiplier = prefetch_multiplier self._maximum_prefetch_restored = True # this contains a tokenbucket for each task type by name, used for # rate limits, or None if rate limits are disabled for that task. self.task_buckets = defaultdict(lambda: None) self.reset_rate_limits() self.hub = hub if self.hub or getattr(self.pool, 'is_green', False): self.amqheartbeat = amqheartbeat if self.amqheartbeat is None: self.amqheartbeat = self.app.conf.broker_heartbeat else: self.amqheartbeat = 0 if not hasattr(self, 'loop'): self.loop = loops.asynloop if hub else loops.synloop if _detect_environment() == 'gevent': # there's a gevent bug that causes timeouts to not be reset, # so if the connection timeout is exceeded once, it can NEVER # connect again. self.app.conf.broker_connection_timeout = None self._pending_operations = [] self.steps = [] self.blueprint = self.Blueprint( steps=self.app.steps['consumer'], on_close=self.on_close, ) self.blueprint.apply(self, **dict(worker_options or {}, **kwargs)) def call_soon(self, p, *args, **kwargs): p = ppartial(p, *args, **kwargs) if self.hub: return self.hub.call_soon(p) self._pending_operations.append(p) return p def perform_pending_operations(self): if not self.hub: while self._pending_operations: try: self._pending_operations.pop()() except Exception as exc: # pylint: disable=broad-except logger.exception('Pending callback raised: %r', exc) def bucket_for_task(self, type): limit = rate(getattr(type, 'rate_limit', None)) return TokenBucket(limit, capacity=1) if limit else None def reset_rate_limits(self): self.task_buckets.update( (n, self.bucket_for_task(t)) for n, t in self.app.tasks.items() ) def _update_prefetch_count(self, index=0): """Update prefetch count after pool/shrink grow operations. Index must be the change in number of processes as a positive (increasing) or negative (decreasing) number. Note: Currently pool grow operations will end up with an offset of +1 if the initial size of the pool was 0 (e.g. :option:`--autoscale=1,0 <celery worker --autoscale>`). """ num_processes = self.pool.num_processes if not self.initial_prefetch_count or not num_processes: return # prefetch disabled self.initial_prefetch_count = ( self.pool.num_processes * self.prefetch_multiplier ) return self._update_qos_eventually(index) def _update_qos_eventually(self, index): return (self.qos.decrement_eventually if index < 0 else self.qos.increment_eventually)( abs(index) * self.prefetch_multiplier) def _limit_move_to_pool(self, request): task_reserved(request) self.on_task_request(request) def _schedule_bucket_request(self, bucket): while True: try: request, tokens = bucket.pop() except IndexError: # no request, break break if bucket.can_consume(tokens): self._limit_move_to_pool(request) continue else: # requeue to head, keep the order. bucket.contents.appendleft((request, tokens)) pri = self._limit_order = (self._limit_order + 1) % 10 hold = bucket.expected_time(tokens) self.timer.call_after( hold, self._schedule_bucket_request, (bucket,), priority=pri, ) # no tokens, break break def _limit_task(self, request, bucket, tokens): bucket.add((request, tokens)) return self._schedule_bucket_request(bucket) def _limit_post_eta(self, request, bucket, tokens): self.qos.decrement_eventually() bucket.add((request, tokens)) return self._schedule_bucket_request(bucket) def start(self): blueprint = self.blueprint while blueprint.state not in STOP_CONDITIONS: maybe_shutdown() if self.restart_count: try: self._restart_state.step() except RestartFreqExceeded as exc: crit('Frequent restarts detected: %r', exc, exc_info=1) sleep(1) self.restart_count += 1 if self.app.conf.broker_channel_error_retry: recoverable_errors = (self.connection_errors + self.channel_errors) else: recoverable_errors = self.connection_errors try: blueprint.start(self) except recoverable_errors as exc: # If we're not retrying connections, we need to properly shutdown or terminate # the Celery main process instead of abruptly aborting the process without any cleanup. is_connection_loss_on_startup = self.first_connection_attempt self.first_connection_attempt = False connection_retry_type = self._get_connection_retry_type(is_connection_loss_on_startup) connection_retry = self.app.conf[connection_retry_type] if not connection_retry: crit( f"Retrying to {'establish' if is_connection_loss_on_startup else 're-establish'} " f"a connection to the message broker after a connection loss has " f"been disabled (app.conf.{connection_retry_type}=False). Shutting down..." ) raise WorkerShutdown(1) from exc if isinstance(exc, OSError) and exc.errno == errno.EMFILE: crit("Too many open files. Aborting...") raise WorkerTerminate(1) from exc maybe_shutdown() if blueprint.state not in STOP_CONDITIONS: if self.connection: self.on_connection_error_after_connected(exc) else: self.on_connection_error_before_connected(exc) self.on_close() blueprint.restart(self) def _get_connection_retry_type(self, is_connection_loss_on_startup): return ('broker_connection_retry_on_startup' if (is_connection_loss_on_startup and self.app.conf.broker_connection_retry_on_startup is not None) else 'broker_connection_retry') def on_connection_error_before_connected(self, exc): error(CONNECTION_ERROR, self.conninfo.as_uri(), exc, 'Trying to reconnect...') def on_connection_error_after_connected(self, exc): warn(CONNECTION_RETRY, exc_info=True) try: self.connection.collect() except Exception: # pylint: disable=broad-except pass if self.app.conf.worker_cancel_long_running_tasks_on_connection_loss: for request in tuple(active_requests): if request.task.acks_late and not request.acknowledged: warn(TERMINATING_TASK_ON_RESTART_AFTER_A_CONNECTION_LOSS, request) request.cancel(self.pool) else: warnings.warn(CANCEL_TASKS_BY_DEFAULT, CPendingDeprecationWarning) if self.app.conf.worker_enable_prefetch_count_reduction: self.initial_prefetch_count = max( self.prefetch_multiplier, self.max_prefetch_count - len(tuple(active_requests)) * self.prefetch_multiplier ) self._maximum_prefetch_restored = self.initial_prefetch_count == self.max_prefetch_count if not self._maximum_prefetch_restored: logger.info( f"Temporarily reducing the prefetch count to {self.initial_prefetch_count} to avoid " f"over-fetching since {len(tuple(active_requests))} tasks are currently being processed.\n" f"The prefetch count will be gradually restored to {self.max_prefetch_count} as the tasks " "complete processing." ) def register_with_event_loop(self, hub): self.blueprint.send_all( self, 'register_with_event_loop', args=(hub,), description='Hub.register', ) def shutdown(self): self.blueprint.shutdown(self) def stop(self): self.blueprint.stop(self) def on_ready(self): callback, self.init_callback = self.init_callback, None if callback: callback(self) def loop_args(self): return (self, self.connection, self.task_consumer, self.blueprint, self.hub, self.qos, self.amqheartbeat, self.app.clock, self.amqheartbeat_rate) def on_decode_error(self, message, exc): """Callback called if an error occurs while decoding a message. Simply logs the error and acknowledges the message so it doesn't enter a loop. Arguments: message (kombu.Message): The message received. exc (Exception): The exception being handled. """ crit(MESSAGE_DECODE_ERROR, exc, message.content_type, message.content_encoding, safe_repr(message.headers), dump_body(message, message.body), exc_info=1) message.ack() def on_close(self): # Clear internal queues to get rid of old messages. # They can't be acked anyway, as a delivery tag is specific # to the current channel. if self.controller and self.controller.semaphore: self.controller.semaphore.clear() if self.timer: self.timer.clear() for bucket in self.task_buckets.values(): if bucket: bucket.clear_pending() for request_id in reserved_requests: if request_id in requests: del requests[request_id] reserved_requests.clear() if self.pool and self.pool.flush: self.pool.flush() def connect(self): """Establish the broker connection used for consuming tasks. Retries establishing the connection if the :setting:`broker_connection_retry` setting is enabled """ conn = self.connection_for_read(heartbeat=self.amqheartbeat) if self.hub: conn.transport.register_with_event_loop(conn.connection, self.hub) return conn def connection_for_read(self, heartbeat=None): return self.ensure_connected( self.app.connection_for_read(heartbeat=heartbeat)) def connection_for_write(self, heartbeat=None): return self.ensure_connected( self.app.connection_for_write(heartbeat=heartbeat)) def ensure_connected(self, conn): # Callback called for each retry while the connection # can't be established. def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP): if getattr(conn, 'alt', None) and interval == 0: next_step = CONNECTION_FAILOVER next_step = next_step.format( when=humanize_seconds(interval, 'in', ' '), retries=int(interval / 2), max_retries=self.app.conf.broker_connection_max_retries) error(CONNECTION_ERROR, conn.as_uri(), exc, next_step) # Remember that the connection is lazy, it won't establish # until needed. # TODO: Rely only on broker_connection_retry_on_startup to determine whether connection retries are disabled. # We will make the switch in Celery 6.0. retry_disabled = False if self.app.conf.broker_connection_retry_on_startup is None: # If broker_connection_retry_on_startup is not set, revert to broker_connection_retry # to determine whether connection retries are disabled. retry_disabled = not self.app.conf.broker_connection_retry warnings.warn( CPendingDeprecationWarning( f"The broker_connection_retry configuration setting will no longer determine\n" f"whether broker connection retries are made during startup in Celery 6.0 and above.\n" f"If you wish to retain the existing behavior for retrying connections on startup,\n" f"you should set broker_connection_retry_on_startup to {self.app.conf.broker_connection_retry}.") ) else: if self.first_connection_attempt: retry_disabled = not self.app.conf.broker_connection_retry_on_startup else: retry_disabled = not self.app.conf.broker_connection_retry if retry_disabled: # Retry disabled, just call connect directly. conn.connect() self.first_connection_attempt = False return conn conn = conn.ensure_connection( _error_handler, self.app.conf.broker_connection_max_retries, callback=maybe_shutdown, ) self.first_connection_attempt = False return conn def _flush_events(self): if self.event_dispatcher: self.event_dispatcher.flush() def on_send_event_buffered(self): if self.hub: self.hub._ready.add(self._flush_events) def add_task_queue(self, queue, exchange=None, exchange_type=None, routing_key=None, **options): cset = self.task_consumer queues = self.app.amqp.queues # Must use in' here, as __missing__ will automatically # create queues when :setting:`task_create_missing_queues` is enabled. # (Issue #1079) if queue in queues: q = queues[queue] else: exchange = queue if exchange is None else exchange exchange_type = ('direct' if exchange_type is None else exchange_type) q = queues.select_add(queue, exchange=exchange, exchange_type=exchange_type, routing_key=routing_key, **options) if not cset.consuming_from(queue): cset.add_queue(q) cset.consume() info('Started consuming from %s', queue) def cancel_task_queue(self, queue): info('Canceling queue %s', queue) self.app.amqp.queues.deselect(queue) self.task_consumer.cancel_by_queue(queue) def apply_eta_task(self, task): """Method called by the timer to apply a task with an ETA/countdown.""" task_reserved(task) self.on_task_request(task) self.qos.decrement_eventually() def _message_report(self, body, message): return MESSAGE_REPORT.format(dump_body(message, body), safe_repr(message.content_type), safe_repr(message.content_encoding), safe_repr(message.delivery_info), safe_repr(message.headers)) def on_unknown_message(self, body, message): warn(UNKNOWN_FORMAT, self._message_report(body, message)) message.reject_log_error(logger, self.connection_errors) signals.task_rejected.send(sender=self, message=message, exc=None) def on_unknown_task(self, body, message, exc): error(UNKNOWN_TASK_ERROR, exc, dump_body(message, body), message.headers, message.delivery_info, exc_info=True) try: id_, name = message.headers['id'], message.headers['task'] root_id = message.headers.get('root_id') except KeyError: # proto1 payload = message.payload id_, name = payload['id'], payload['task'] root_id = None request = Bunch( name=name, chord=None, root_id=root_id, correlation_id=message.properties.get('correlation_id'), reply_to=message.properties.get('reply_to'), errbacks=None, ) message.reject_log_error(logger, self.connection_errors) self.app.backend.mark_as_failure( id_, NotRegistered(name), request=request, ) if self.event_dispatcher: self.event_dispatcher.send( 'task-failed', uuid=id_, exception=f'NotRegistered({name!r})', ) signals.task_unknown.send( sender=self, message=message, exc=exc, name=name, id=id_, ) def on_invalid_task(self, body, message, exc): error(INVALID_TASK_ERROR, exc, dump_body(message, body), exc_info=True) message.reject_log_error(logger, self.connection_errors) signals.task_rejected.send(sender=self, message=message, exc=exc) def update_strategies(self): loader = self.app.loader for name, task in self.app.tasks.items(): self.strategies[name] = task.start_strategy(self.app, self) task.__trace__ = build_tracer(name, task, loader, self.hostname, app=self.app) def create_task_handler(self, promise=promise): strategies = self.strategies on_unknown_message = self.on_unknown_message on_unknown_task = self.on_unknown_task on_invalid_task = self.on_invalid_task callbacks = self.on_task_message call_soon = self.call_soon def on_task_received(message): # payload will only be set for v1 protocol, since v2 # will defer deserializing the message body to the pool. payload = None try: type_ = message.headers['task'] # protocol v2 except TypeError: return on_unknown_message(None, message) except KeyError: try: payload = message.decode() except Exception as exc: # pylint: disable=broad-except return self.on_decode_error(message, exc) try: type_, payload = payload['task'], payload # protocol v1 except (TypeError, KeyError): return on_unknown_message(payload, message) try: strategy = strategies[type_] except KeyError as exc: return on_unknown_task(None, message, exc) else: try: ack_log_error_promise = promise( call_soon, (message.ack_log_error,), on_error=self._restore_prefetch_count_after_connection_restart, ) reject_log_error_promise = promise( call_soon, (message.reject_log_error,), on_error=self._restore_prefetch_count_after_connection_restart, ) if ( not self._maximum_prefetch_restored and self.restart_count > 0 and self._new_prefetch_count <= self.max_prefetch_count ): ack_log_error_promise.then(self._restore_prefetch_count_after_connection_restart, on_error=self._restore_prefetch_count_after_connection_restart) reject_log_error_promise.then(self._restore_prefetch_count_after_connection_restart, on_error=self._restore_prefetch_count_after_connection_restart) strategy( message, payload, ack_log_error_promise, reject_log_error_promise, callbacks, ) except (InvalidTaskError, ContentDisallowed) as exc: return on_invalid_task(payload, message, exc) except DecodeError as exc: return self.on_decode_error(message, exc) return on_task_received def _restore_prefetch_count_after_connection_restart(self, p, *args): with self.qos._mutex: if any(( not self.app.conf.worker_enable_prefetch_count_reduction, self._maximum_prefetch_restored, )): return new_prefetch_count = min(self.max_prefetch_count, self._new_prefetch_count) self.qos.value = self.initial_prefetch_count = new_prefetch_count self.qos.set(self.qos.value) already_restored = self._maximum_prefetch_restored self._maximum_prefetch_restored = new_prefetch_count == self.max_prefetch_count if already_restored is False and self._maximum_prefetch_restored is True: logger.info( "Resuming normal operations following a restart.\n" f"Prefetch count has been restored to the maximum of {self.max_prefetch_count}" ) @property def max_prefetch_count(self): return self.pool.num_processes * self.prefetch_multiplier @property def _new_prefetch_count(self): return self.qos.value + self.prefetch_multiplier def __repr__(self): """``repr(self)``.""" return '<Consumer: {self.hostname} ({state})>'.format( self=self, state=self.blueprint.human_state(), ) class Evloop(bootsteps.StartStopStep): """Event loop service. Note: This is always started last. """ label = 'event loop' last = True def start(self, c): self.patch_all(c) c.loop(*c.loop_args()) def patch_all(self, c): c.qos._mutex = DummyLock()