PNG  IHDRX cHRMz&u0`:pQ<bKGD pHYsodtIME MeqIDATxw]Wug^Qd˶ 6`!N:!@xI~)%7%@Bh&`lnjVF29gΨ4E$|>cɚ{gk= %,a KX%,a KX%,a KX%,a KX%,a KX%,a KX%, b` ǟzeאfp]<!SJmɤY޲ڿ,%c ~ع9VH.!Ͳz&QynֺTkRR.BLHi٪:l;@(!MԴ=žI,:o&N'Kù\vRmJ雵֫AWic H@" !: Cé||]k-Ha oݜ:y F())u]aG7*JV@J415p=sZH!=!DRʯvɱh~V\}v/GKY$n]"X"}t@ xS76^[bw4dsce)2dU0 CkMa-U5tvLƀ~mlMwfGE/-]7XAƟ`׮g ewxwC4\[~7@O-Q( a*XGƒ{ ՟}$_y3tĐƤatgvێi|K=uVyrŲlLӪuܿzwk$m87k( `múcE)"@rK( z4$D; 2kW=Xb$V[Ru819קR~qloѱDyįݎ*mxw]y5e4K@ЃI0A D@"BDk_)N\8͜9dz"fK0zɿvM /.:2O{ Nb=M=7>??Zuo32 DLD@D| &+֎C #B8ַ`bOb $D#ͮҪtx]%`ES`Ru[=¾!@Od37LJ0!OIR4m]GZRJu$‡c=%~s@6SKy?CeIh:[vR@Lh | (BhAMy=݃  G"'wzn޺~8ԽSh ~T*A:xR[ܹ?X[uKL_=fDȊ؂p0}7=D$Ekq!/t.*2ʼnDbŞ}DijYaȲ(""6HA;:LzxQ‘(SQQ}*PL*fc\s `/d'QXW, e`#kPGZuŞuO{{wm[&NBTiiI0bukcA9<4@SӊH*؎4U/'2U5.(9JuDfrޱtycU%j(:RUbArLֺN)udA':uGQN"-"Is.*+k@ `Ojs@yU/ H:l;@yyTn}_yw!VkRJ4P)~y#)r,D =ě"Q]ci'%HI4ZL0"MJy 8A{ aN<8D"1#IJi >XjX֔#@>-{vN!8tRݻ^)N_╗FJEk]CT՟ YP:_|H1@ CBk]yKYp|og?*dGvzنzӴzjֺNkC~AbZƷ`.H)=!QͷVTT(| u78y֮}|[8-Vjp%2JPk[}ԉaH8Wpqhwr:vWª<}l77_~{s۴V+RCģ%WRZ\AqHifɤL36: #F:p]Bq/z{0CU6ݳEv_^k7'>sq*+kH%a`0ԣisqにtү04gVgW΂iJiS'3w.w}l6MC2uԯ|>JF5`fV5m`Y**Db1FKNttu]4ccsQNnex/87+}xaUW9y>ͯ骵G{䩓Գ3+vU}~jJ.NFRD7<aJDB1#ҳgSb,+CS?/ VG J?|?,2#M9}B)MiE+G`-wo߫V`fio(}S^4e~V4bHOYb"b#E)dda:'?}׮4繏`{7Z"uny-?ǹ;0MKx{:_pÚmFמ:F " .LFQLG)Q8qN q¯¯3wOvxDb\. BKD9_NN &L:4D{mm o^tֽ:q!ƥ}K+<"m78N< ywsard5+вz~mnG)=}lYݧNj'QJS{S :UYS-952?&O-:W}(!6Mk4+>A>j+i|<<|;ر^߉=HE|V#F)Emm#}/"y GII웻Jі94+v뾧xu~5C95~ūH>c@덉pʃ1/4-A2G%7>m;–Y,cyyaln" ?ƻ!ʪ<{~h~i y.zZB̃/,雋SiC/JFMmBH&&FAbϓO^tubbb_hZ{_QZ-sύodFgO(6]TJA˯#`۶ɟ( %$&+V'~hiYy>922 Wp74Zkq+Ovn錄c>8~GqܲcWꂎz@"1A.}T)uiW4="jJ2W7mU/N0gcqܗOO}?9/wìXžΏ0 >֩(V^Rh32!Hj5`;O28؇2#ݕf3 ?sJd8NJ@7O0 b־?lldщ̡&|9C.8RTWwxWy46ah嘦mh٤&l zCy!PY?: CJyв]dm4ǜҐR޻RլhX{FƯanшQI@x' ao(kUUuxW_Ñ줮[w8 FRJ(8˼)_mQ _!RJhm=!cVmm ?sFOnll6Qk}alY}; "baӌ~M0w,Ggw2W:G/k2%R,_=u`WU R.9T"v,<\Ik޽/2110Ӿxc0gyC&Ny޽JҢrV6N ``یeA16"J³+Rj*;BϜkZPJaÍ<Jyw:NP8/D$ 011z֊Ⱳ3ι֘k1V_"h!JPIΣ'ɜ* aEAd:ݺ>y<}Lp&PlRfTb1]o .2EW\ͮ]38؋rTJsǏP@芎sF\> P^+dYJLbJ C-xϐn> ι$nj,;Ǖa FU *择|h ~izť3ᤓ`K'-f tL7JK+vf2)V'-sFuB4i+m+@My=O҈0"|Yxoj,3]:cо3 $#uŘ%Y"y죯LebqtҢVzq¼X)~>4L׶m~[1_k?kxֺQ`\ |ٛY4Ѯr!)N9{56(iNq}O()Em]=F&u?$HypWUeB\k]JɩSع9 Zqg4ZĊo oMcjZBU]B\TUd34ݝ~:7ڶSUsB0Z3srx 7`:5xcx !qZA!;%͚7&P H<WL!džOb5kF)xor^aujƍ7 Ǡ8/p^(L>ὴ-B,{ۇWzֺ^k]3\EE@7>lYBȝR.oHnXO/}sB|.i@ɥDB4tcm,@ӣgdtJ!lH$_vN166L__'Z)y&kH;:,Y7=J 9cG) V\hjiE;gya~%ks_nC~Er er)muuMg2;֫R)Md) ,¶ 2-wr#F7<-BBn~_(o=KO㭇[Xv eN_SMgSҐ BS헃D%g_N:/pe -wkG*9yYSZS.9cREL !k}<4_Xs#FmҶ:7R$i,fi!~' # !6/S6y@kZkZcX)%5V4P]VGYq%H1!;e1MV<!ϐHO021Dp= HMs~~a)ަu7G^];git!Frl]H/L$=AeUvZE4P\.,xi {-~p?2b#amXAHq)MWǾI_r`S Hz&|{ +ʖ_= (YS(_g0a03M`I&'9vl?MM+m~}*xT۲(fY*V4x@29s{DaY"toGNTO+xCAO~4Ϳ;p`Ѫ:>Ҵ7K 3}+0 387x\)a"/E>qpWB=1 ¨"MP(\xp߫́A3+J] n[ʼnӼaTbZUWb={~2ooKױӰp(CS\S筐R*JغV&&"FA}J>G֐p1ٸbk7 ŘH$JoN <8s^yk_[;gy-;߉DV{c B yce% aJhDȶ 2IdйIB/^n0tNtџdcKj4϶v~- CBcgqx9= PJ) dMsjpYB] GD4RDWX +h{y`,3ꊕ$`zj*N^TP4L:Iz9~6s) Ga:?y*J~?OrMwP\](21sZUD ?ܟQ5Q%ggW6QdO+\@ ̪X'GxN @'4=ˋ+*VwN ne_|(/BDfj5(Dq<*tNt1х!MV.C0 32b#?n0pzj#!38}޴o1KovCJ`8ŗ_"]] rDUy޲@ Ȗ-;xџ'^Y`zEd?0„ DAL18IS]VGq\4o !swV7ˣι%4FѮ~}6)OgS[~Q vcYbL!wG3 7띸*E Pql8=jT\꘿I(z<[6OrR8ºC~ډ]=rNl[g|v TMTղb-o}OrP^Q]<98S¤!k)G(Vkwyqyr޽Nv`N/e p/~NAOk \I:G6]4+K;j$R:Mi #*[AȚT,ʰ,;N{HZTGMoּy) ]%dHء9Պ䠬|<45,\=[bƟ8QXeB3- &dҩ^{>/86bXmZ]]yޚN[(WAHL$YAgDKp=5GHjU&99v簪C0vygln*P)9^͞}lMuiH!̍#DoRBn9l@ xA/_v=ȺT{7Yt2N"4!YN`ae >Q<XMydEB`VU}u]嫇.%e^ánE87Mu\t`cP=AD/G)sI"@MP;)]%fH9'FNsj1pVhY&9=0pfuJ&gޤx+k:!r˭wkl03׼Ku C &ѓYt{.O.zҏ z}/tf_wEp2gvX)GN#I ݭ߽v/ .& и(ZF{e"=V!{zW`, ]+LGz"(UJp|j( #V4, 8B 0 9OkRrlɱl94)'VH9=9W|>PS['G(*I1==C<5"Pg+x'K5EMd؞Af8lG ?D FtoB[je?{k3zQ vZ;%Ɠ,]E>KZ+T/ EJxOZ1i #T<@ I}q9/t'zi(EMqw`mYkU6;[t4DPeckeM;H}_g pMww}k6#H㶏+b8雡Sxp)&C $@'b,fPߑt$RbJ'vznuS ~8='72_`{q纶|Q)Xk}cPz9p7O:'|G~8wx(a 0QCko|0ASD>Ip=4Q, d|F8RcU"/KM opKle M3#i0c%<7׿p&pZq[TR"BpqauIp$ 8~Ĩ!8Սx\ւdT>>Z40ks7 z2IQ}ItԀ<-%S⍤};zIb$I 5K}Q͙D8UguWE$Jh )cu4N tZl+[]M4k8֦Zeq֮M7uIqG 1==tLtR,ƜSrHYt&QP윯Lg' I,3@P'}'R˪e/%-Auv·ñ\> vDJzlӾNv5:|K/Jb6KI9)Zh*ZAi`?S {aiVDԲuy5W7pWeQJk֤#5&V<̺@/GH?^τZL|IJNvI:'P=Ϛt"¨=cud S Q.Ki0 !cJy;LJR;G{BJy޺[^8fK6)=yʊ+(k|&xQ2`L?Ȓ2@Mf 0C`6-%pKpm')c$׻K5[J*U[/#hH!6acB JA _|uMvDyk y)6OPYjœ50VT K}cǻP[ $:]4MEA.y)|B)cf-A?(e|lɉ#P9V)[9t.EiQPDѠ3ϴ;E:+Օ t ȥ~|_N2,ZJLt4! %ա]u {+=p.GhNcŞQI?Nd'yeh n7zi1DB)1S | S#ًZs2|Ɛy$F SxeX{7Vl.Src3E℃Q>b6G ўYCmtկ~=K0f(=LrAS GN'ɹ9<\!a`)֕y[uՍ[09` 9 +57ts6}b4{oqd+J5fa/,97J#6yν99mRWxJyѡyu_TJc`~W>l^q#Ts#2"nD1%fS)FU w{ܯ R{ ˎ󅃏џDsZSQS;LV;7 Od1&1n$ N /.q3~eNɪ]E#oM~}v֯FڦwyZ=<<>Xo稯lfMFV6p02|*=tV!c~]fa5Y^Q_WN|Vs 0ҘދU97OI'N2'8N֭fgg-}V%y]U4 峧p*91#9U kCac_AFңĪy뚇Y_AiuYyTTYЗ-(!JFLt›17uTozc. S;7A&&<ԋ5y;Ro+:' *eYJkWR[@F %SHWP 72k4 qLd'J "zB6{AC0ƁA6U.'F3:Ȅ(9ΜL;D]m8ڥ9}dU "v!;*13Rg^fJyShyy5auA?ɩGHRjo^]׽S)Fm\toy 4WQS@mE#%5ʈfFYDX ~D5Ϡ9tE9So_aU4?Ѽm%&c{n>.KW1Tlb}:j uGi(JgcYj0qn+>) %\!4{LaJso d||u//P_y7iRJ߬nHOy) l+@$($VFIQ9%EeKʈU. ia&FY̒mZ=)+qqoQn >L!qCiDB;Y<%} OgBxB!ØuG)WG9y(Ą{_yesuZmZZey'Wg#C~1Cev@0D $a@˲(.._GimA:uyw֬%;@!JkQVM_Ow:P.s\)ot- ˹"`B,e CRtaEUP<0'}r3[>?G8xU~Nqu;Wm8\RIkբ^5@k+5(By'L&'gBJ3ݶ!/㮻w҅ yqPWUg<e"Qy*167΃sJ\oz]T*UQ<\FԎ`HaNmڜ6DysCask8wP8y9``GJ9lF\G g's Nn͵MLN֪u$| /|7=]O)6s !ĴAKh]q_ap $HH'\1jB^s\|- W1:=6lJBqjY^LsPk""`]w)󭃈,(HC ?䔨Y$Sʣ{4Z+0NvQkhol6C.婧/u]FwiVjZka&%6\F*Ny#8O,22+|Db~d ~Çwc N:FuuCe&oZ(l;@ee-+Wn`44AMK➝2BRՈt7g*1gph9N) *"TF*R(#'88pm=}X]u[i7bEc|\~EMn}P瘊J)K.0i1M6=7'_\kaZ(Th{K*GJyytw"IO-PWJk)..axӝ47"89Cc7ĐBiZx 7m!fy|ϿF9CbȩV 9V-՛^pV̌ɄS#Bv4-@]Vxt-Z, &ֺ*diؠ2^VXbs֔Ìl.jQ]Y[47gj=幽ex)A0ip׳ W2[ᎇhuE^~q흙L} #-b۸oFJ_QP3r6jr+"nfzRJTUqoaۍ /$d8Mx'ݓ= OՃ| )$2mcM*cЙj}f };n YG w0Ia!1Q.oYfr]DyISaP}"dIӗթO67jqR ҊƐƈaɤGG|h;t]䗖oSv|iZqX)oalv;۩meEJ\!8=$4QU4Xo&VEĊ YS^E#d,yX_> ۘ-e\ "Wa6uLĜZi`aD9.% w~mB(02G[6y.773a7 /=o7D)$Z 66 $bY^\CuP. (x'"J60׿Y:Oi;F{w佩b+\Yi`TDWa~|VH)8q/=9!g߆2Y)?ND)%?Ǐ`k/sn:;O299yB=a[Ng 3˲N}vLNy;*?x?~L&=xyӴ~}q{qE*IQ^^ͧvü{Huu=R|>JyUlZV, B~/YF!Y\u_ݼF{_C)LD]m {H 0ihhadd nUkf3oٺCvE\)QJi+֥@tDJkB$1!Đr0XQ|q?d2) Ӣ_}qv-< FŊ߫%roppVBwü~JidY4:}L6M7f٬F "?71<2#?Jyy4뷢<_a7_=Q E=S1И/9{+93֮E{ǂw{))?maÆm(uLE#lïZ  ~d];+]h j?!|$F}*"4(v'8s<ŏUkm7^7no1w2ؗ}TrͿEk>p'8OB7d7R(A 9.*Mi^ͳ; eeUwS+C)uO@ =Sy]` }l8^ZzRXj[^iUɺ$tj))<sbDJfg=Pk_{xaKo1:-uyG0M ԃ\0Lvuy'ȱc2Ji AdyVgVh!{]/&}}ċJ#%d !+87<;qN޼Nفl|1N:8ya  8}k¾+-$4FiZYÔXk*I&'@iI99)HSh4+2G:tGhS^繿 Kتm0 вDk}֚+QT4;sC}rՅE,8CX-e~>G&'9xpW,%Fh,Ry56Y–hW-(v_,? ; qrBk4-V7HQ;ˇ^Gv1JVV%,ik;D_W!))+BoS4QsTM;gt+ndS-~:11Sgv!0qRVh!"Ȋ(̦Yl.]PQWgٳE'`%W1{ndΗBk|Ž7ʒR~,lnoa&:ü$ 3<a[CBݮwt"o\ePJ=Hz"_c^Z.#ˆ*x z̝grY]tdkP*:97YľXyBkD4N.C_[;F9`8& !AMO c `@BA& Ost\-\NX+Xp < !bj3C&QL+*&kAQ=04}cC!9~820G'PC9xa!w&bo_1 Sw"ܱ V )Yl3+ס2KoXOx]"`^WOy :3GO0g;%Yv㐫(R/r (s } u B &FeYZh0y> =2<Ϟc/ -u= c&׭,.0"g"7 6T!vl#sc>{u/Oh Bᾈ)۴74]x7 gMӒ"d]U)}" v4co[ ɡs 5Gg=XR14?5A}D "b{0$L .\4y{_fe:kVS\\O]c^W52LSBDM! C3Dhr̦RtArx4&agaN3Cf<Ԉp4~ B'"1@.b_/xQ} _߃҉/gٓ2Qkqp0շpZ2fԫYz< 4L.Cyυι1t@鎫Fe sYfsF}^ V}N<_`p)alٶ "(XEAVZ<)2},:Ir*#m_YӼ R%a||EƼIJ,,+f"96r/}0jE/)s)cjW#w'Sʯ5<66lj$a~3Kʛy 2:cZ:Yh))+a߭K::N,Q F'qB]={.]h85C9cr=}*rk?vwV렵ٸW Rs%}rNAkDv|uFLBkWY YkX מ|)1!$#3%y?pF<@<Rr0}: }\J [5FRxY<9"SQdE(Q*Qʻ)q1E0B_O24[U'],lOb ]~WjHޏTQ5Syu wq)xnw8~)c 쫬gٲߠ H% k5dƝk> kEj,0% b"vi2Wس_CuK)K{n|>t{P1򨾜j>'kEkƗBg*H%'_aY6Bn!TL&ɌOb{c`'d^{t\i^[uɐ[}q0lM˕G:‚4kb祔c^:?bpg… +37stH:0}en6x˟%/<]BL&* 5&fK9Mq)/iyqtA%kUe[ڛKN]Ě^,"`/ s[EQQm?|XJ߅92m]G.E΃ח U*Cn.j_)Tѧj̿30ڇ!A0=͜ar I3$C^-9#|pk!)?7.x9 @OO;WƝZBFU keZ75F6Tc6"ZȚs2y/1 ʵ:u4xa`C>6Rb/Yм)^=+~uRd`/|_8xbB0?Ft||Z\##|K 0>>zxv8۴吅q 8ĥ)"6>~\8:qM}#͚'ĉ#p\׶ l#bA?)|g g9|8jP(cr,BwV (WliVxxᡁ@0Okn;ɥh$_ckCgriv}>=wGzβ KkBɛ[˪ !J)h&k2%07δt}!d<9;I&0wV/ v 0<H}L&8ob%Hi|޶o&h1L|u֦y~󛱢8fٲUsւ)0oiFx2}X[zVYr_;N(w]_4B@OanC?gĦx>мgx>ΛToZoOMp>40>V Oy V9iq!4 LN,ˢu{jsz]|"R޻&'ƚ{53ўFu(<٪9:΋]B;)B>1::8;~)Yt|0(pw2N%&X,URBK)3\zz&}ax4;ǟ(tLNg{N|Ǽ\G#C9g$^\}p?556]/RP.90 k,U8/u776s ʪ_01چ|\N 0VV*3H鴃J7iI!wG_^ypl}r*jɤSR 5QN@ iZ#1ٰy;_\3\BQQ x:WJv츟ٯ$"@6 S#qe딇(/P( Dy~TOϻ<4:-+F`0||;Xl-"uw$Цi󼕝mKʩorz"mϺ$F:~E'ҐvD\y?Rr8_He@ e~O,T.(ފR*cY^m|cVR[8 JҡSm!ΆԨb)RHG{?MpqrmN>߶Y)\p,d#xۆWY*,l6]v0h15M˙MS8+EdI='LBJIH7_9{Caз*Lq,dt >+~ّeʏ?xԕ4bBAŚjﵫ!'\Ը$WNvKO}ӽmSşذqsOy?\[,d@'73'j%kOe`1.g2"e =YIzS2|zŐƄa\U,dP;jhhhaxǶ?КZ՚.q SE+XrbOu%\GتX(H,N^~]JyEZQKceTQ]VGYqnah;y$cQahT&QPZ*iZ8UQQM.qo/T\7X"u?Mttl2Xq(IoW{R^ ux*SYJ! 4S.Jy~ BROS[V|žKNɛP(L6V^|cR7i7nZW1Fd@ Ara{詑|(T*dN]Ko?s=@ |_EvF]׍kR)eBJc" MUUbY6`~V޴dJKß&~'d3i WWWWWW
Current Directory: /opt/imh-python/lib/python3.9/site-packages/kombu/transport
Viewing File: /opt/imh-python/lib/python3.9/site-packages/kombu/transport/gcpubsub.py
"""GCP Pub/Sub transport module for kombu. More information about GCP Pub/Sub: https://cloud.google.com/pubsub Features ======== * Type: Virtual * Supports Direct: Yes * Supports Topic: No * Supports Fanout: Yes * Supports Priority: No * Supports TTL: No Connection String ================= Connection string has the following formats: .. code-block:: gcpubsub://projects/project-name Transport Options ================= * ``queue_name_prefix``: (str) Prefix for queue names. * ``ack_deadline_seconds``: (int) The maximum time after receiving a message and acknowledging it before pub/sub redelivers the message. * ``expiration_seconds``: (int) Subscriptions without any subscriber activity or changes made to their properties are removed after this period. Examples of subscriber activities include open connections, active pulls, or successful pushes. * ``wait_time_seconds``: (int) The maximum time to wait for new messages. Defaults to 10. * ``retry_timeout_seconds``: (int) The maximum time to wait before retrying. * ``bulk_max_messages``: (int) The maximum number of messages to pull in bulk. Defaults to 32. """ from __future__ import annotations import dataclasses import datetime import string import threading from concurrent.futures import (FIRST_COMPLETED, Future, ThreadPoolExecutor, wait) from contextlib import suppress from os import getpid from queue import Empty from threading import Lock from time import monotonic, sleep from uuid import NAMESPACE_OID, uuid3 from _socket import gethostname from _socket import timeout as socket_timeout from google.api_core.exceptions import (AlreadyExists, DeadlineExceeded, PermissionDenied) from google.api_core.retry import Retry from google.cloud import monitoring_v3 from google.cloud.monitoring_v3 import query from google.cloud.pubsub_v1 import PublisherClient, SubscriberClient from google.cloud.pubsub_v1 import exceptions as pubsub_exceptions from google.cloud.pubsub_v1.publisher import exceptions as publisher_exceptions from google.cloud.pubsub_v1.subscriber import \ exceptions as subscriber_exceptions from google.pubsub_v1 import gapic_version as package_version from kombu.entity import TRANSIENT_DELIVERY_MODE from kombu.log import get_logger from kombu.utils.encoding import bytes_to_str, safe_str from kombu.utils.json import dumps, loads from kombu.utils.objects import cached_property from . import virtual logger = get_logger('kombu.transport.gcpubsub') # dots are replaced by dash, all other punctuation replaced by underscore. PUNCTUATIONS_TO_REPLACE = set(string.punctuation) - {'_', '.', '-'} CHARS_REPLACE_TABLE = { ord('.'): ord('-'), **{ord(c): ord('_') for c in PUNCTUATIONS_TO_REPLACE}, } class UnackedIds: """Threadsafe list of ack_ids.""" def __init__(self): self._list = [] self._lock = Lock() def append(self, val): # append is atomic self._list.append(val) def extend(self, vals: list): # extend is atomic self._list.extend(vals) def pop(self, index=-1): with self._lock: return self._list.pop(index) def remove(self, val): with self._lock, suppress(ValueError): self._list.remove(val) def __len__(self): with self._lock: return len(self._list) def __getitem__(self, item): # getitem is atomic return self._list[item] class AtomicCounter: """Threadsafe counter. Returns the value after inc/dec operations. """ def __init__(self, initial=0): self._value = initial self._lock = Lock() def inc(self, n=1): with self._lock: self._value += n return self._value def dec(self, n=1): with self._lock: self._value -= n return self._value def get(self): with self._lock: return self._value @dataclasses.dataclass class QueueDescriptor: """Pub/Sub queue descriptor.""" name: str topic_path: str # projects/{project_id}/topics/{topic_id} subscription_id: str subscription_path: str # projects/{project_id}/subscriptions/{subscription_id} unacked_ids: UnackedIds = dataclasses.field(default_factory=UnackedIds) class Channel(virtual.Channel): """GCP Pub/Sub channel.""" supports_fanout = True do_restore = False # pub/sub does that for us default_wait_time_seconds = 10 default_ack_deadline_seconds = 240 default_expiration_seconds = 86400 default_retry_timeout_seconds = 300 default_bulk_max_messages = 32 _min_ack_deadline = 10 _fanout_exchanges = set() _unacked_extender: threading.Thread = None _stop_extender = threading.Event() _n_channels = AtomicCounter() _queue_cache: dict[str, QueueDescriptor] = {} _tmp_subscriptions: set[str] = set() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.pool = ThreadPoolExecutor() logger.info('new GCP pub/sub channel: %s', self.conninfo.hostname) self.project_id = Transport.parse_uri(self.conninfo.hostname) if self._n_channels.inc() == 1: Channel._unacked_extender = threading.Thread( target=self._extend_unacked_deadline, daemon=True, ) self._stop_extender.clear() Channel._unacked_extender.start() def entity_name(self, name: str, table=CHARS_REPLACE_TABLE) -> str: """Format AMQP queue name into a valid Pub/Sub queue name.""" if not name.startswith(self.queue_name_prefix): name = self.queue_name_prefix + name return str(safe_str(name)).translate(table) def _queue_bind(self, exchange, routing_key, pattern, queue): exchange_type = self.typeof(exchange).type queue = self.entity_name(queue) logger.debug( 'binding queue: %s to %s exchange: %s with routing_key: %s', queue, exchange_type, exchange, routing_key, ) filter_args = {} if exchange_type == 'direct': # Direct exchange is implemented as a single subscription # E.g. for exchange 'test_direct': # -topic:'test_direct' # -bound queue:'direct1': # -subscription: direct1' on topic 'test_direct' # -filter:routing_key' filter_args = { 'filter': f'attributes.routing_key="{routing_key}"' } subscription_path = self.subscriber.subscription_path( self.project_id, queue ) message_retention_duration = self.expiration_seconds elif exchange_type == 'fanout': # Fanout exchange is implemented as a separate subscription. # E.g. for exchange 'test_fanout': # -topic:'test_fanout' # -bound queue 'fanout1': # -subscription:'fanout1-uuid' on topic 'test_fanout' # -bound queue 'fanout2': # -subscription:'fanout2-uuid' on topic 'test_fanout' uid = f'{uuid3(NAMESPACE_OID, f"{gethostname()}.{getpid()}")}' uniq_sub_name = f'{queue}-{uid}' subscription_path = self.subscriber.subscription_path( self.project_id, uniq_sub_name ) self._tmp_subscriptions.add(subscription_path) self._fanout_exchanges.add(exchange) message_retention_duration = 600 else: raise NotImplementedError( f'exchange type {exchange_type} not implemented' ) exchange_topic = self._create_topic( self.project_id, exchange, message_retention_duration ) self._create_subscription( topic_path=exchange_topic, subscription_path=subscription_path, filter_args=filter_args, msg_retention=message_retention_duration, ) qdesc = QueueDescriptor( name=queue, topic_path=exchange_topic, subscription_id=queue, subscription_path=subscription_path, ) self._queue_cache[queue] = qdesc def _create_topic( self, project_id: str, topic_id: str, message_retention_duration: int = None, ) -> str: topic_path = self.publisher.topic_path(project_id, topic_id) if self._is_topic_exists(topic_path): # topic creation takes a while, so skip if possible logger.debug('topic: %s exists', topic_path) return topic_path try: logger.debug('creating topic: %s', topic_path) request = {'name': topic_path} if message_retention_duration: request[ 'message_retention_duration' ] = f'{message_retention_duration}s' self.publisher.create_topic(request=request) except AlreadyExists: pass return topic_path def _is_topic_exists(self, topic_path: str) -> bool: topics = self.publisher.list_topics( request={"project": f'projects/{self.project_id}'} ) for t in topics: if t.name == topic_path: return True return False def _create_subscription( self, project_id: str = None, topic_id: str = None, topic_path: str = None, subscription_path: str = None, filter_args=None, msg_retention: int = None, ) -> str: subscription_path = ( subscription_path or self.subscriber.subscription_path(self.project_id, topic_id) ) topic_path = topic_path or self.publisher.topic_path( project_id, topic_id ) try: logger.debug( 'creating subscription: %s, topic: %s, filter: %s', subscription_path, topic_path, filter_args, ) msg_retention = msg_retention or self.expiration_seconds self.subscriber.create_subscription( request={ "name": subscription_path, "topic": topic_path, 'ack_deadline_seconds': self.ack_deadline_seconds, 'expiration_policy': { 'ttl': f'{self.expiration_seconds}s' }, 'message_retention_duration': f'{msg_retention}s', **(filter_args or {}), } ) except AlreadyExists: pass return subscription_path def _delete(self, queue, *args, **kwargs): """Delete a queue by name.""" queue = self.entity_name(queue) logger.info('deleting queue: %s', queue) qdesc = self._queue_cache.get(queue) if not qdesc: return self.subscriber.delete_subscription( request={"subscription": qdesc.subscription_path} ) self._queue_cache.pop(queue, None) def _put(self, queue, message, **kwargs): """Put a message onto the queue.""" queue = self.entity_name(queue) qdesc = self._queue_cache[queue] routing_key = self._get_routing_key(message) logger.debug( 'putting message to queue: %s, topic: %s, routing_key: %s', queue, qdesc.topic_path, routing_key, ) encoded_message = dumps(message) self.publisher.publish( qdesc.topic_path, encoded_message.encode("utf-8"), routing_key=routing_key, ) def _put_fanout(self, exchange, message, routing_key, **kwargs): """Put a message onto fanout exchange.""" self._lookup(exchange, routing_key) topic_path = self.publisher.topic_path(self.project_id, exchange) logger.debug( 'putting msg to fanout exchange: %s, topic: %s', exchange, topic_path, ) encoded_message = dumps(message) self.publisher.publish( topic_path, encoded_message.encode("utf-8"), retry=Retry(deadline=self.retry_timeout_seconds), ) def _get(self, queue: str, timeout: float = None): """Retrieves a single message from a queue.""" queue = self.entity_name(queue) qdesc = self._queue_cache[queue] try: response = self.subscriber.pull( request={ 'subscription': qdesc.subscription_path, 'max_messages': 1, }, retry=Retry(deadline=self.retry_timeout_seconds), timeout=timeout or self.wait_time_seconds, ) except DeadlineExceeded: raise Empty() if len(response.received_messages) == 0: raise Empty() message = response.received_messages[0] ack_id = message.ack_id payload = loads(message.message.data) delivery_info = payload['properties']['delivery_info'] logger.debug( 'queue:%s got message, ack_id: %s, payload: %s', queue, ack_id, payload['properties'], ) if self._is_auto_ack(payload['properties']): logger.debug('auto acking message ack_id: %s', ack_id) self._do_ack([ack_id], qdesc.subscription_path) else: delivery_info['gcpubsub_message'] = { 'queue': queue, 'ack_id': ack_id, 'message_id': message.message.message_id, 'subscription_path': qdesc.subscription_path, } qdesc.unacked_ids.append(ack_id) return payload def _is_auto_ack(self, payload_properties: dict): exchange = payload_properties['delivery_info']['exchange'] delivery_mode = payload_properties['delivery_mode'] return ( delivery_mode == TRANSIENT_DELIVERY_MODE or exchange in self._fanout_exchanges ) def _get_bulk(self, queue: str, timeout: float): """Retrieves bulk of messages from a queue.""" prefixed_queue = self.entity_name(queue) qdesc = self._queue_cache[prefixed_queue] max_messages = self._get_max_messages_estimate() if not max_messages: raise Empty() try: response = self.subscriber.pull( request={ 'subscription': qdesc.subscription_path, 'max_messages': max_messages, }, retry=Retry(deadline=self.retry_timeout_seconds), timeout=timeout or self.wait_time_seconds, ) except DeadlineExceeded: raise Empty() received_messages = response.received_messages if len(received_messages) == 0: raise Empty() auto_ack_ids = [] ret_payloads = [] logger.debug( 'batching %d messages from queue: %s', len(received_messages), prefixed_queue, ) for message in received_messages: ack_id = message.ack_id payload = loads(bytes_to_str(message.message.data)) delivery_info = payload['properties']['delivery_info'] delivery_info['gcpubsub_message'] = { 'queue': prefixed_queue, 'ack_id': ack_id, 'message_id': message.message.message_id, 'subscription_path': qdesc.subscription_path, } if self._is_auto_ack(payload['properties']): auto_ack_ids.append(ack_id) else: qdesc.unacked_ids.append(ack_id) ret_payloads.append(payload) if auto_ack_ids: logger.debug('auto acking ack_ids: %s', auto_ack_ids) self._do_ack(auto_ack_ids, qdesc.subscription_path) return queue, ret_payloads def _get_max_messages_estimate(self) -> int: max_allowed = self.qos.can_consume_max_estimate() max_if_unlimited = self.bulk_max_messages return max_if_unlimited if max_allowed is None else max_allowed def _lookup(self, exchange, routing_key, default=None): exchange_info = self.state.exchanges.get(exchange, {}) if not exchange_info: return super()._lookup(exchange, routing_key, default) ret = self.typeof(exchange).lookup( self.get_table(exchange), exchange, routing_key, default, ) if ret: return ret logger.debug( 'no queues bound to exchange: %s, binding on the fly', exchange, ) self.queue_bind(exchange, exchange, routing_key) return [exchange] def _size(self, queue: str) -> int: """Return the number of messages in a queue. This is a *rough* estimation, as Pub/Sub doesn't provide an exact API. """ queue = self.entity_name(queue) if queue not in self._queue_cache: return 0 qdesc = self._queue_cache[queue] result = query.Query( self.monitor, self.project_id, 'pubsub.googleapis.com/subscription/num_undelivered_messages', end_time=datetime.datetime.now(), minutes=1, ).select_resources(subscription_id=qdesc.subscription_id) # monitoring API requires the caller to have the monitoring.viewer # role. Since we can live without the exact number of messages # in the queue, we can ignore the exception and allow users to # use the transport without this role. with suppress(PermissionDenied): return sum( content.points[0].value.int64_value for content in result ) return -1 def basic_ack(self, delivery_tag, multiple=False): """Acknowledge one message.""" if multiple: raise NotImplementedError('multiple acks not implemented') delivery_info = self.qos.get(delivery_tag).delivery_info pubsub_message = delivery_info['gcpubsub_message'] ack_id = pubsub_message['ack_id'] queue = pubsub_message['queue'] logger.debug('ack message. queue: %s ack_id: %s', queue, ack_id) subscription_path = pubsub_message['subscription_path'] self._do_ack([ack_id], subscription_path) qdesc = self._queue_cache[queue] qdesc.unacked_ids.remove(ack_id) super().basic_ack(delivery_tag) def _do_ack(self, ack_ids: list[str], subscription_path: str): self.subscriber.acknowledge( request={"subscription": subscription_path, "ack_ids": ack_ids}, retry=Retry(deadline=self.retry_timeout_seconds), ) def _purge(self, queue: str): """Delete all current messages in a queue.""" queue = self.entity_name(queue) qdesc = self._queue_cache.get(queue) if not qdesc: return n = self._size(queue) self.subscriber.seek( request={ "subscription": qdesc.subscription_path, "time": datetime.datetime.now(), } ) return n def _extend_unacked_deadline(self): thread_id = threading.get_native_id() logger.info( 'unacked deadline extension thread: [%s] started', thread_id, ) min_deadline_sleep = self._min_ack_deadline / 2 sleep_time = max(min_deadline_sleep, self.ack_deadline_seconds / 4) while not self._stop_extender.wait(sleep_time): for qdesc in self._queue_cache.values(): if len(qdesc.unacked_ids) == 0: logger.debug( 'thread [%s]: no unacked messages for %s', thread_id, qdesc.subscription_path, ) continue logger.debug( 'thread [%s]: extend ack deadline for %s: %d msgs [%s]', thread_id, qdesc.subscription_path, len(qdesc.unacked_ids), list(qdesc.unacked_ids), ) self.subscriber.modify_ack_deadline( request={ "subscription": qdesc.subscription_path, "ack_ids": list(qdesc.unacked_ids), "ack_deadline_seconds": self.ack_deadline_seconds, } ) logger.info( 'unacked deadline extension thread [%s] stopped', thread_id ) def after_reply_message_received(self, queue: str): queue = self.entity_name(queue) sub = self.subscriber.subscription_path(self.project_id, queue) logger.debug( 'after_reply_message_received: queue: %s, sub: %s', queue, sub ) self._tmp_subscriptions.add(sub) @cached_property def subscriber(self): return SubscriberClient() @cached_property def publisher(self): return PublisherClient() @cached_property def monitor(self): return monitoring_v3.MetricServiceClient() @property def conninfo(self): return self.connection.client @property def transport_options(self): return self.connection.client.transport_options @cached_property def wait_time_seconds(self): return self.transport_options.get( 'wait_time_seconds', self.default_wait_time_seconds ) @cached_property def retry_timeout_seconds(self): return self.transport_options.get( 'retry_timeout_seconds', self.default_retry_timeout_seconds ) @cached_property def ack_deadline_seconds(self): return self.transport_options.get( 'ack_deadline_seconds', self.default_ack_deadline_seconds ) @cached_property def queue_name_prefix(self): return self.transport_options.get('queue_name_prefix', 'kombu-') @cached_property def expiration_seconds(self): return self.transport_options.get( 'expiration_seconds', self.default_expiration_seconds ) @cached_property def bulk_max_messages(self): return self.transport_options.get( 'bulk_max_messages', self.default_bulk_max_messages ) def close(self): """Close the channel.""" logger.debug('closing channel') while self._tmp_subscriptions: sub = self._tmp_subscriptions.pop() with suppress(Exception): logger.debug('deleting subscription: %s', sub) self.subscriber.delete_subscription( request={"subscription": sub} ) if not self._n_channels.dec(): self._stop_extender.set() Channel._unacked_extender.join() super().close() @staticmethod def _get_routing_key(message): routing_key = ( message['properties'] .get('delivery_info', {}) .get('routing_key', '') ) return routing_key class Transport(virtual.Transport): """GCP Pub/Sub transport.""" Channel = Channel can_parse_url = True polling_interval = 0.1 connection_errors = virtual.Transport.connection_errors + ( pubsub_exceptions.TimeoutError, ) channel_errors = ( virtual.Transport.channel_errors + ( publisher_exceptions.FlowControlLimitError, publisher_exceptions.MessageTooLargeError, publisher_exceptions.PublishError, publisher_exceptions.TimeoutError, publisher_exceptions.PublishToPausedOrderingKeyException, ) + (subscriber_exceptions.AcknowledgeError,) ) driver_type = 'gcpubsub' driver_name = 'pubsub_v1' implements = virtual.Transport.implements.extend( exchange_type=frozenset(['direct', 'fanout']), ) def __init__(self, client, **kwargs): super().__init__(client, **kwargs) self._pool = ThreadPoolExecutor() self._get_bulk_future_to_queue: dict[Future, str] = dict() def driver_version(self): return package_version.__version__ @staticmethod def parse_uri(uri: str) -> str: # URL like: # gcpubsub://projects/project-name project = uri.split('gcpubsub://projects/')[1] return project.strip('/') @classmethod def as_uri(self, uri: str, include_password=False, mask='**') -> str: return uri or 'gcpubsub://' def drain_events(self, connection, timeout=None): time_start = monotonic() polling_interval = self.polling_interval if timeout and polling_interval and polling_interval > timeout: polling_interval = timeout while 1: try: self._drain_from_active_queues(timeout=timeout) except Empty: if timeout and monotonic() - time_start >= timeout: raise socket_timeout() if polling_interval: sleep(polling_interval) else: break def _drain_from_active_queues(self, timeout): # cleanup empty requests from prev run self._rm_empty_bulk_requests() # submit new requests for all active queues # longer timeout means less frequent polling # and more messages in a single bulk self._submit_get_bulk_requests(timeout=10) done, _ = wait( self._get_bulk_future_to_queue, timeout=timeout, return_when=FIRST_COMPLETED, ) empty = {f for f in done if f.exception()} done -= empty for f in empty: self._get_bulk_future_to_queue.pop(f, None) if not done: raise Empty() logger.debug('got %d done get_bulk tasks', len(done)) for f in done: queue, payloads = f.result() for payload in payloads: logger.debug('consuming message from queue: %s', queue) if queue not in self._callbacks: logger.warning( 'Message for queue %s without consumers', queue ) continue self._deliver(payload, queue) self._get_bulk_future_to_queue.pop(f, None) def _rm_empty_bulk_requests(self): empty = { f for f in self._get_bulk_future_to_queue if f.done() and f.exception() } for f in empty: self._get_bulk_future_to_queue.pop(f, None) def _submit_get_bulk_requests(self, timeout): queues_with_submitted_get_bulk = set( self._get_bulk_future_to_queue.values() ) for channel in self.channels: for queue in channel._active_queues: if queue in queues_with_submitted_get_bulk: continue future = self._pool.submit(channel._get_bulk, queue, timeout) self._get_bulk_future_to_queue[future] = queue