PNG  IHDRX cHRMz&u0`:pQ<bKGD pHYsodtIME MeqIDATxw]Wug^Qd˶ 6`!N:!@xI~)%7%@Bh&`lnjVF29gΨ4E$|>cɚ{gk= %,a KX%,a KX%,a KX%,a KX%,a KX%,a KX%, b` ǟzeאfp]<!SJmɤY޲ڿ,%c ~ع9VH.!Ͳz&QynֺTkRR.BLHi٪:l;@(!MԴ=žI,:o&N'Kù\vRmJ雵֫AWic H@" !: Cé||]k-Ha oݜ:y F())u]aG7*JV@J415p=sZH!=!DRʯvɱh~V\}v/GKY$n]"X"}t@ xS76^[bw4dsce)2dU0 CkMa-U5tvLƀ~mlMwfGE/-]7XAƟ`׮g ewxwC4\[~7@O-Q( a*XGƒ{ ՟}$_y3tĐƤatgvێi|K=uVyrŲlLӪuܿzwk$m87k( `múcE)"@rK( z4$D; 2kW=Xb$V[Ru819קR~qloѱDyįݎ*mxw]y5e4K@ЃI0A D@"BDk_)N\8͜9dz"fK0zɿvM /.:2O{ Nb=M=7>??Zuo32 DLD@D| &+֎C #B8ַ`bOb $D#ͮҪtx]%`ES`Ru[=¾!@Od37LJ0!OIR4m]GZRJu$‡c=%~s@6SKy?CeIh:[vR@Lh | (BhAMy=݃  G"'wzn޺~8ԽSh ~T*A:xR[ܹ?X[uKL_=fDȊ؂p0}7=D$Ekq!/t.*2ʼnDbŞ}DijYaȲ(""6HA;:LzxQ‘(SQQ}*PL*fc\s `/d'QXW, e`#kPGZuŞuO{{wm[&NBTiiI0bukcA9<4@SӊH*؎4U/'2U5.(9JuDfrޱtycU%j(:RUbArLֺN)udA':uGQN"-"Is.*+k@ `Ojs@yU/ H:l;@yyTn}_yw!VkRJ4P)~y#)r,D =ě"Q]ci'%HI4ZL0"MJy 8A{ aN<8D"1#IJi >XjX֔#@>-{vN!8tRݻ^)N_╗FJEk]CT՟ YP:_|H1@ CBk]yKYp|og?*dGvzنzӴzjֺNkC~AbZƷ`.H)=!QͷVTT(| u78y֮}|[8-Vjp%2JPk[}ԉaH8Wpqhwr:vWª<}l77_~{s۴V+RCģ%WRZ\AqHifɤL36: #F:p]Bq/z{0CU6ݳEv_^k7'>sq*+kH%a`0ԣisqにtү04gVgW΂iJiS'3w.w}l6MC2uԯ|>JF5`fV5m`Y**Db1FKNttu]4ccsQNnex/87+}xaUW9y>ͯ骵G{䩓Գ3+vU}~jJ.NFRD7<aJDB1#ҳgSb,+CS?/ VG J?|?,2#M9}B)MiE+G`-wo߫V`fio(}S^4e~V4bHOYb"b#E)dda:'?}׮4繏`{7Z"uny-?ǹ;0MKx{:_pÚmFמ:F " .LFQLG)Q8qN q¯¯3wOvxDb\. BKD9_NN &L:4D{mm o^tֽ:q!ƥ}K+<"m78N< ywsard5+вz~mnG)=}lYݧNj'QJS{S :UYS-952?&O-:W}(!6Mk4+>A>j+i|<<|;ر^߉=HE|V#F)Emm#}/"y GII웻Jі94+v뾧xu~5C95~ūH>c@덉pʃ1/4-A2G%7>m;–Y,cyyaln" ?ƻ!ʪ<{~h~i y.zZB̃/,雋SiC/JFMmBH&&FAbϓO^tubbb_hZ{_QZ-sύodFgO(6]TJA˯#`۶ɟ( %$&+V'~hiYy>922 Wp74Zkq+Ovn錄c>8~GqܲcWꂎz@"1A.}T)uiW4="jJ2W7mU/N0gcqܗOO}?9/wìXžΏ0 >֩(V^Rh32!Hj5`;O28؇2#ݕf3 ?sJd8NJ@7O0 b־?lldщ̡&|9C.8RTWwxWy46ah嘦mh٤&l zCy!PY?: CJyв]dm4ǜҐR޻RլhX{FƯanшQI@x' ao(kUUuxW_Ñ줮[w8 FRJ(8˼)_mQ _!RJhm=!cVmm ?sFOnll6Qk}alY}; "baӌ~M0w,Ggw2W:G/k2%R,_=u`WU R.9T"v,<\Ik޽/2110Ӿxc0gyC&Ny޽JҢrV6N ``یeA16"J³+Rj*;BϜkZPJaÍ<Jyw:NP8/D$ 011z֊Ⱳ3ι֘k1V_"h!JPIΣ'ɜ* aEAd:ݺ>y<}Lp&PlRfTb1]o .2EW\ͮ]38؋rTJsǏP@芎sF\> P^+dYJLbJ C-xϐn> ι$nj,;Ǖa FU *择|h ~izť3ᤓ`K'-f tL7JK+vf2)V'-sFuB4i+m+@My=O҈0"|Yxoj,3]:cо3 $#uŘ%Y"y죯LebqtҢVzq¼X)~>4L׶m~[1_k?kxֺQ`\ |ٛY4Ѯr!)N9{56(iNq}O()Em]=F&u?$HypWUeB\k]JɩSع9 Zqg4ZĊo oMcjZBU]B\TUd34ݝ~:7ڶSUsB0Z3srx 7`:5xcx !qZA!;%͚7&P H<WL!džOb5kF)xor^aujƍ7 Ǡ8/p^(L>ὴ-B,{ۇWzֺ^k]3\EE@7>lYBȝR.oHnXO/}sB|.i@ɥDB4tcm,@ӣgdtJ!lH$_vN166L__'Z)y&kH;:,Y7=J 9cG) V\hjiE;gya~%ks_nC~Er er)muuMg2;֫R)Md) ,¶ 2-wr#F7<-BBn~_(o=KO㭇[Xv eN_SMgSҐ BS헃D%g_N:/pe -wkG*9yYSZS.9cREL !k}<4_Xs#FmҶ:7R$i,fi!~' # !6/S6y@kZkZcX)%5V4P]VGYq%H1!;e1MV<!ϐHO021Dp= HMs~~a)ަu7G^];git!Frl]H/L$=AeUvZE4P\.,xi {-~p?2b#amXAHq)MWǾI_r`S Hz&|{ +ʖ_= (YS(_g0a03M`I&'9vl?MM+m~}*xT۲(fY*V4x@29s{DaY"toGNTO+xCAO~4Ϳ;p`Ѫ:>Ҵ7K 3}+0 387x\)a"/E>qpWB=1 ¨"MP(\xp߫́A3+J] n[ʼnӼaTbZUWb={~2ooKױӰp(CS\S筐R*JغV&&"FA}J>G֐p1ٸbk7 ŘH$JoN <8s^yk_[;gy-;߉DV{c B yce% aJhDȶ 2IdйIB/^n0tNtџdcKj4϶v~- CBcgqx9= PJ) dMsjpYB] GD4RDWX +h{y`,3ꊕ$`zj*N^TP4L:Iz9~6s) Ga:?y*J~?OrMwP\](21sZUD ?ܟQ5Q%ggW6QdO+\@ ̪X'GxN @'4=ˋ+*VwN ne_|(/BDfj5(Dq<*tNt1х!MV.C0 32b#?n0pzj#!38}޴o1KovCJ`8ŗ_"]] rDUy޲@ Ȗ-;xџ'^Y`zEd?0„ DAL18IS]VGq\4o !swV7ˣι%4FѮ~}6)OgS[~Q vcYbL!wG3 7띸*E Pql8=jT\꘿I(z<[6OrR8ºC~ډ]=rNl[g|v TMTղb-o}OrP^Q]<98S¤!k)G(Vkwyqyr޽Nv`N/e p/~NAOk \I:G6]4+K;j$R:Mi #*[AȚT,ʰ,;N{HZTGMoּy) ]%dHء9Պ䠬|<45,\=[bƟ8QXeB3- &dҩ^{>/86bXmZ]]yޚN[(WAHL$YAgDKp=5GHjU&99v簪C0vygln*P)9^͞}lMuiH!̍#DoRBn9l@ xA/_v=ȺT{7Yt2N"4!YN`ae >Q<XMydEB`VU}u]嫇.%e^ánE87Mu\t`cP=AD/G)sI"@MP;)]%fH9'FNsj1pVhY&9=0pfuJ&gޤx+k:!r˭wkl03׼Ku C &ѓYt{.O.zҏ z}/tf_wEp2gvX)GN#I ݭ߽v/ .& и(ZF{e"=V!{zW`, ]+LGz"(UJp|j( #V4, 8B 0 9OkRrlɱl94)'VH9=9W|>PS['G(*I1==C<5"Pg+x'K5EMd؞Af8lG ?D FtoB[je?{k3zQ vZ;%Ɠ,]E>KZ+T/ EJxOZ1i #T<@ I}q9/t'zi(EMqw`mYkU6;[t4DPeckeM;H}_g pMww}k6#H㶏+b8雡Sxp)&C $@'b,fPߑt$RbJ'vznuS ~8='72_`{q纶|Q)Xk}cPz9p7O:'|G~8wx(a 0QCko|0ASD>Ip=4Q, d|F8RcU"/KM opKle M3#i0c%<7׿p&pZq[TR"BpqauIp$ 8~Ĩ!8Սx\ւdT>>Z40ks7 z2IQ}ItԀ<-%S⍤};zIb$I 5K}Q͙D8UguWE$Jh )cu4N tZl+[]M4k8֦Zeq֮M7uIqG 1==tLtR,ƜSrHYt&QP윯Lg' I,3@P'}'R˪e/%-Auv·ñ\> vDJzlӾNv5:|K/Jb6KI9)Zh*ZAi`?S {aiVDԲuy5W7pWeQJk֤#5&V<̺@/GH?^τZL|IJNvI:'P=Ϛt"¨=cud S Q.Ki0 !cJy;LJR;G{BJy޺[^8fK6)=yʊ+(k|&xQ2`L?Ȓ2@Mf 0C`6-%pKpm')c$׻K5[J*U[/#hH!6acB JA _|uMvDyk y)6OPYjœ50VT K}cǻP[ $:]4MEA.y)|B)cf-A?(e|lɉ#P9V)[9t.EiQPDѠ3ϴ;E:+Օ t ȥ~|_N2,ZJLt4! %ա]u {+=p.GhNcŞQI?Nd'yeh n7zi1DB)1S | S#ًZs2|Ɛy$F SxeX{7Vl.Src3E℃Q>b6G ўYCmtկ~=K0f(=LrAS GN'ɹ9<\!a`)֕y[uՍ[09` 9 +57ts6}b4{oqd+J5fa/,97J#6yν99mRWxJyѡyu_TJc`~W>l^q#Ts#2"nD1%fS)FU w{ܯ R{ ˎ󅃏џDsZSQS;LV;7 Od1&1n$ N /.q3~eNɪ]E#oM~}v֯FڦwyZ=<<>Xo稯lfMFV6p02|*=tV!c~]fa5Y^Q_WN|Vs 0ҘދU97OI'N2'8N֭fgg-}V%y]U4 峧p*91#9U kCac_AFңĪy뚇Y_AiuYyTTYЗ-(!JFLt›17uTozc. S;7A&&<ԋ5y;Ro+:' *eYJkWR[@F %SHWP 72k4 qLd'J "zB6{AC0ƁA6U.'F3:Ȅ(9ΜL;D]m8ڥ9}dU "v!;*13Rg^fJyShyy5auA?ɩGHRjo^]׽S)Fm\toy 4WQS@mE#%5ʈfFYDX ~D5Ϡ9tE9So_aU4?Ѽm%&c{n>.KW1Tlb}:j uGi(JgcYj0qn+>) %\!4{LaJso d||u//P_y7iRJ߬nHOy) l+@$($VFIQ9%EeKʈU. ia&FY̒mZ=)+qqoQn >L!qCiDB;Y<%} OgBxB!ØuG)WG9y(Ą{_yesuZmZZey'Wg#C~1Cev@0D $a@˲(.._GimA:uyw֬%;@!JkQVM_Ow:P.s\)ot- ˹"`B,e CRtaEUP<0'}r3[>?G8xU~Nqu;Wm8\RIkբ^5@k+5(By'L&'gBJ3ݶ!/㮻w҅ yqPWUg<e"Qy*167΃sJ\oz]T*UQ<\FԎ`HaNmڜ6DysCask8wP8y9``GJ9lF\G g's Nn͵MLN֪u$| /|7=]O)6s !ĴAKh]q_ap $HH'\1jB^s\|- W1:=6lJBqjY^LsPk""`]w)󭃈,(HC ?䔨Y$Sʣ{4Z+0NvQkhol6C.婧/u]FwiVjZka&%6\F*Ny#8O,22+|Db~d ~Çwc N:FuuCe&oZ(l;@ee-+Wn`44AMK➝2BRՈt7g*1gph9N) *"TF*R(#'88pm=}X]u[i7bEc|\~EMn}P瘊J)K.0i1M6=7'_\kaZ(Th{K*GJyytw"IO-PWJk)..axӝ47"89Cc7ĐBiZx 7m!fy|ϿF9CbȩV 9V-՛^pV̌ɄS#Bv4-@]Vxt-Z, &ֺ*diؠ2^VXbs֔Ìl.jQ]Y[47gj=幽ex)A0ip׳ W2[ᎇhuE^~q흙L} #-b۸oFJ_QP3r6jr+"nfzRJTUqoaۍ /$d8Mx'ݓ= OՃ| )$2mcM*cЙj}f };n YG w0Ia!1Q.oYfr]DyISaP}"dIӗթO67jqR ҊƐƈaɤGG|h;t]䗖oSv|iZqX)oalv;۩meEJ\!8=$4QU4Xo&VEĊ YS^E#d,yX_> ۘ-e\ "Wa6uLĜZi`aD9.% w~mB(02G[6y.773a7 /=o7D)$Z 66 $bY^\CuP. (x'"J60׿Y:Oi;F{w佩b+\Yi`TDWa~|VH)8q/=9!g߆2Y)?ND)%?Ǐ`k/sn:;O299yB=a[Ng 3˲N}vLNy;*?x?~L&=xyӴ~}q{qE*IQ^^ͧvü{Huu=R|>JyUlZV, B~/YF!Y\u_ݼF{_C)LD]m {H 0ihhadd nUkf3oٺCvE\)QJi+֥@tDJkB$1!Đr0XQ|q?d2) Ӣ_}qv-< FŊ߫%roppVBwü~JidY4:}L6M7f٬F "?71<2#?Jyy4뷢<_a7_=Q E=S1И/9{+93֮E{ǂw{))?maÆm(uLE#lïZ  ~d];+]h j?!|$F}*"4(v'8s<ŏUkm7^7no1w2ؗ}TrͿEk>p'8OB7d7R(A 9.*Mi^ͳ; eeUwS+C)uO@ =Sy]` }l8^ZzRXj[^iUɺ$tj))<sbDJfg=Pk_{xaKo1:-uyG0M ԃ\0Lvuy'ȱc2Ji AdyVgVh!{]/&}}ċJ#%d !+87<;qN޼Nفl|1N:8ya  8}k¾+-$4FiZYÔXk*I&'@iI99)HSh4+2G:tGhS^繿 Kتm0 вDk}֚+QT4;sC}rՅE,8CX-e~>G&'9xpW,%Fh,Ry56Y–hW-(v_,? ; qrBk4-V7HQ;ˇ^Gv1JVV%,ik;D_W!))+BoS4QsTM;gt+ndS-~:11Sgv!0qRVh!"Ȋ(̦Yl.]PQWgٳE'`%W1{ndΗBk|Ž7ʒR~,lnoa&:ü$ 3<a[CBݮwt"o\ePJ=Hz"_c^Z.#ˆ*x z̝grY]tdkP*:97YľXyBkD4N.C_[;F9`8& !AMO c `@BA& Ost\-\NX+Xp < !bj3C&QL+*&kAQ=04}cC!9~820G'PC9xa!w&bo_1 Sw"ܱ V )Yl3+ס2KoXOx]"`^WOy :3GO0g;%Yv㐫(R/r (s } u B &FeYZh0y> =2<Ϟc/ -u= c&׭,.0"g"7 6T!vl#sc>{u/Oh Bᾈ)۴74]x7 gMӒ"d]U)}" v4co[ ɡs 5Gg=XR14?5A}D "b{0$L .\4y{_fe:kVS\\O]c^W52LSBDM! C3Dhr̦RtArx4&agaN3Cf<Ԉp4~ B'"1@.b_/xQ} _߃҉/gٓ2Qkqp0շpZ2fԫYz< 4L.Cyυι1t@鎫Fe sYfsF}^ V}N<_`p)alٶ "(XEAVZ<)2},:Ir*#m_YӼ R%a||EƼIJ,,+f"96r/}0jE/)s)cjW#w'Sʯ5<66lj$a~3Kʛy 2:cZ:Yh))+a߭K::N,Q F'qB]={.]h85C9cr=}*rk?vwV렵ٸW Rs%}rNAkDv|uFLBkWY YkX מ|)1!$#3%y?pF<@<Rr0}: }\J [5FRxY<9"SQdE(Q*Qʻ)q1E0B_O24[U'],lOb ]~WjHޏTQ5Syu wq)xnw8~)c 쫬gٲߠ H% k5dƝk> kEj,0% b"vi2Wس_CuK)K{n|>t{P1򨾜j>'kEkƗBg*H%'_aY6Bn!TL&ɌOb{c`'d^{t\i^[uɐ[}q0lM˕G:‚4kb祔c^:?bpg… +37stH:0}en6x˟%/<]BL&* 5&fK9Mq)/iyqtA%kUe[ڛKN]Ě^,"`/ s[EQQm?|XJ߅92m]G.E΃ח U*Cn.j_)Tѧj̿30ڇ!A0=͜ar I3$C^-9#|pk!)?7.x9 @OO;WƝZBFU keZ75F6Tc6"ZȚs2y/1 ʵ:u4xa`C>6Rb/Yм)^=+~uRd`/|_8xbB0?Ft||Z\##|K 0>>zxv8۴吅q 8ĥ)"6>~\8:qM}#͚'ĉ#p\׶ l#bA?)|g g9|8jP(cr,BwV (WliVxxᡁ@0Okn;ɥh$_ckCgriv}>=wGzβ KkBɛ[˪ !J)h&k2%07δt}!d<9;I&0wV/ v 0<H}L&8ob%Hi|޶o&h1L|u֦y~󛱢8fٲUsւ)0oiFx2}X[zVYr_;N(w]_4B@OanC?gĦx>мgx>ΛToZoOMp>40>V Oy V9iq!4 LN,ˢu{jsz]|"R޻&'ƚ{53ўFu(<٪9:΋]B;)B>1::8;~)Yt|0(pw2N%&X,URBK)3\zz&}ax4;ǟ(tLNg{N|Ǽ\G#C9g$^\}p?556]/RP.90 k,U8/u776s ʪ_01چ|\N 0VV*3H鴃J7iI!wG_^ypl}r*jɤSR 5QN@ iZ#1ٰy;_\3\BQQ x:WJv츟ٯ$"@6 S#qe딇(/P( Dy~TOϻ<4:-+F`0||;Xl-"uw$Цi󼕝mKʩorz"mϺ$F:~E'ҐvD\y?Rr8_He@ e~O,T.(ފR*cY^m|cVR[8 JҡSm!ΆԨb)RHG{?MpqrmN>߶Y)\p,d#xۆWY*,l6]v0h15M˙MS8+EdI='LBJIH7_9{Caз*Lq,dt >+~ّeʏ?xԕ4bBAŚjﵫ!'\Ը$WNvKO}ӽmSşذqsOy?\[,d@'73'j%kOe`1.g2"e =YIzS2|zŐƄa\U,dP;jhhhaxǶ?КZ՚.q SE+XrbOu%\GتX(H,N^~]JyEZQKceTQ]VGYqnah;y$cQahT&QPZ*iZ8UQQM.qo/T\7X"u?Mttl2Xq(IoW{R^ ux*SYJ! 4S.Jy~ BROS[V|žKNɛP(L6V^|cR7i7nZW1Fd@ Ara{詑|(T*dN]Ko?s=@ |_EvF]׍kR)eBJc" MUUbY6`~V޴dJKß&~'d3i WWWWWW
Current Directory: /usr/local/nagios/venv/lib/python3.13/site-packages/redis/commands/search
Viewing File: /usr/local/nagios/venv/lib/python3.13/site-packages/redis/commands/search/commands.py
import itertools import time from typing import Dict, List, Optional, Union from redis.client import NEVER_DECODE, Pipeline from redis.utils import deprecated_function from ..helpers import get_protocol_version from ._util import to_string from .aggregation import AggregateRequest, AggregateResult, Cursor from .document import Document from .field import Field from .index_definition import IndexDefinition from .profile_information import ProfileInformation from .query import Query from .result import Result from .suggestion import SuggestionParser NUMERIC = "NUMERIC" CREATE_CMD = "FT.CREATE" ALTER_CMD = "FT.ALTER" SEARCH_CMD = "FT.SEARCH" ADD_CMD = "FT.ADD" ADDHASH_CMD = "FT.ADDHASH" DROPINDEX_CMD = "FT.DROPINDEX" EXPLAIN_CMD = "FT.EXPLAIN" EXPLAINCLI_CMD = "FT.EXPLAINCLI" DEL_CMD = "FT.DEL" AGGREGATE_CMD = "FT.AGGREGATE" PROFILE_CMD = "FT.PROFILE" CURSOR_CMD = "FT.CURSOR" SPELLCHECK_CMD = "FT.SPELLCHECK" DICT_ADD_CMD = "FT.DICTADD" DICT_DEL_CMD = "FT.DICTDEL" DICT_DUMP_CMD = "FT.DICTDUMP" MGET_CMD = "FT.MGET" CONFIG_CMD = "FT.CONFIG" TAGVALS_CMD = "FT.TAGVALS" ALIAS_ADD_CMD = "FT.ALIASADD" ALIAS_UPDATE_CMD = "FT.ALIASUPDATE" ALIAS_DEL_CMD = "FT.ALIASDEL" INFO_CMD = "FT.INFO" SUGADD_COMMAND = "FT.SUGADD" SUGDEL_COMMAND = "FT.SUGDEL" SUGLEN_COMMAND = "FT.SUGLEN" SUGGET_COMMAND = "FT.SUGGET" SYNUPDATE_CMD = "FT.SYNUPDATE" SYNDUMP_CMD = "FT.SYNDUMP" NOOFFSETS = "NOOFFSETS" NOFIELDS = "NOFIELDS" NOHL = "NOHL" NOFREQS = "NOFREQS" MAXTEXTFIELDS = "MAXTEXTFIELDS" TEMPORARY = "TEMPORARY" STOPWORDS = "STOPWORDS" SKIPINITIALSCAN = "SKIPINITIALSCAN" WITHSCORES = "WITHSCORES" FUZZY = "FUZZY" WITHPAYLOADS = "WITHPAYLOADS" class SearchCommands: """Search commands.""" def _parse_results(self, cmd, res, **kwargs): if get_protocol_version(self.client) in ["3", 3]: return ProfileInformation(res) if cmd == "FT.PROFILE" else res else: return self._RESP2_MODULE_CALLBACKS[cmd](res, **kwargs) def _parse_info(self, res, **kwargs): it = map(to_string, res) return dict(zip(it, it)) def _parse_search(self, res, **kwargs): return Result( res, not kwargs["query"]._no_content, duration=kwargs["duration"], has_payload=kwargs["query"]._with_payloads, with_scores=kwargs["query"]._with_scores, field_encodings=kwargs["query"]._return_fields_decode_as, ) def _parse_aggregate(self, res, **kwargs): return self._get_aggregate_result(res, kwargs["query"], kwargs["has_cursor"]) def _parse_profile(self, res, **kwargs): query = kwargs["query"] if isinstance(query, AggregateRequest): result = self._get_aggregate_result(res[0], query, query._cursor) else: result = Result( res[0], not query._no_content, duration=kwargs["duration"], has_payload=query._with_payloads, with_scores=query._with_scores, ) return result, ProfileInformation(res[1]) def _parse_spellcheck(self, res, **kwargs): corrections = {} if res == 0: return corrections for _correction in res: if isinstance(_correction, int) and _correction == 0: continue if len(_correction) != 3: continue if not _correction[2]: continue if not _correction[2][0]: continue # For spellcheck output # 1) 1) "TERM" # 2) "{term1}" # 3) 1) 1) "{score1}" # 2) "{suggestion1}" # 2) 1) "{score2}" # 2) "{suggestion2}" # # Following dictionary will be made # corrections = { # '{term1}': [ # {'score': '{score1}', 'suggestion': '{suggestion1}'}, # {'score': '{score2}', 'suggestion': '{suggestion2}'} # ] # } corrections[_correction[1]] = [ {"score": _item[0], "suggestion": _item[1]} for _item in _correction[2] ] return corrections def _parse_config_get(self, res, **kwargs): return {kvs[0]: kvs[1] for kvs in res} if res else {} def _parse_syndump(self, res, **kwargs): return {res[i]: res[i + 1] for i in range(0, len(res), 2)} def batch_indexer(self, chunk_size=100): """ Create a new batch indexer from the client with a given chunk size """ return self.BatchIndexer(self, chunk_size=chunk_size) def create_index( self, fields: List[Field], no_term_offsets: bool = False, no_field_flags: bool = False, stopwords: Optional[List[str]] = None, definition: Optional[IndexDefinition] = None, max_text_fields=False, temporary=None, no_highlight: bool = False, no_term_frequencies: bool = False, skip_initial_scan: bool = False, ): """ Creates the search index. The index must not already exist. For more information, see https://redis.io/commands/ft.create/ Args: fields: A list of Field objects. no_term_offsets: If `true`, term offsets will not be saved in the index. no_field_flags: If true, field flags that allow searching in specific fields will not be saved. stopwords: If provided, the index will be created with this custom stopword list. The list can be empty. definition: If provided, the index will be created with this custom index definition. max_text_fields: If true, indexes will be encoded as if there were more than 32 text fields, allowing for additional fields beyond 32. temporary: Creates a lightweight temporary index which will expire after the specified period of inactivity. The internal idle timer is reset whenever the index is searched or added to. no_highlight: If true, disables highlighting support. Also implied by `no_term_offsets`. no_term_frequencies: If true, term frequencies will not be saved in the index. skip_initial_scan: If true, the initial scan and indexing will be skipped. """ args = [CREATE_CMD, self.index_name] if definition is not None: args += definition.args if max_text_fields: args.append(MAXTEXTFIELDS) if temporary is not None and isinstance(temporary, int): args.append(TEMPORARY) args.append(temporary) if no_term_offsets: args.append(NOOFFSETS) if no_highlight: args.append(NOHL) if no_field_flags: args.append(NOFIELDS) if no_term_frequencies: args.append(NOFREQS) if skip_initial_scan: args.append(SKIPINITIALSCAN) if stopwords is not None and isinstance(stopwords, (list, tuple, set)): args += [STOPWORDS, len(stopwords)] if len(stopwords) > 0: args += list(stopwords) args.append("SCHEMA") try: args += list(itertools.chain(*(f.redis_args() for f in fields))) except TypeError: args += fields.redis_args() return self.execute_command(*args) def alter_schema_add(self, fields: List[str]): """ Alter the existing search index by adding new fields. The index must already exist. ### Parameters: - **fields**: a list of Field objects to add for the index For more information see `FT.ALTER <https://redis.io/commands/ft.alter>`_. """ # noqa args = [ALTER_CMD, self.index_name, "SCHEMA", "ADD"] try: args += list(itertools.chain(*(f.redis_args() for f in fields))) except TypeError: args += fields.redis_args() return self.execute_command(*args) def dropindex(self, delete_documents: bool = False): """ Drop the index if it exists. Replaced `drop_index` in RediSearch 2.0. Default behavior was changed to not delete the indexed documents. ### Parameters: - **delete_documents**: If `True`, all documents will be deleted. For more information see `FT.DROPINDEX <https://redis.io/commands/ft.dropindex>`_. """ # noqa args = [DROPINDEX_CMD, self.index_name] delete_str = ( "DD" if isinstance(delete_documents, bool) and delete_documents is True else "" ) if delete_str: args.append(delete_str) return self.execute_command(*args) def _add_document( self, doc_id, conn=None, nosave=False, score=1.0, payload=None, replace=False, partial=False, language=None, no_create=False, **fields, ): """ Internal add_document used for both batch and single doc indexing """ if partial or no_create: replace = True args = [ADD_CMD, self.index_name, doc_id, score] if nosave: args.append("NOSAVE") if payload is not None: args.append("PAYLOAD") args.append(payload) if replace: args.append("REPLACE") if partial: args.append("PARTIAL") if no_create: args.append("NOCREATE") if language: args += ["LANGUAGE", language] args.append("FIELDS") args += list(itertools.chain(*fields.items())) if conn is not None: return conn.execute_command(*args) return self.execute_command(*args) def _add_document_hash( self, doc_id, conn=None, score=1.0, language=None, replace=False ): """ Internal add_document_hash used for both batch and single doc indexing """ args = [ADDHASH_CMD, self.index_name, doc_id, score] if replace: args.append("REPLACE") if language: args += ["LANGUAGE", language] if conn is not None: return conn.execute_command(*args) return self.execute_command(*args) @deprecated_function( version="2.0.0", reason="deprecated since redisearch 2.0, call hset instead" ) def add_document( self, doc_id: str, nosave: bool = False, score: float = 1.0, payload: bool = None, replace: bool = False, partial: bool = False, language: Optional[str] = None, no_create: str = False, **fields: List[str], ): """ Add a single document to the index. Args: doc_id: the id of the saved document. nosave: if set to true, we just index the document, and don't save a copy of it. This means that searches will just return ids. score: the document ranking, between 0.0 and 1.0 payload: optional inner-index payload we can save for fast access in scoring functions replace: if True, and the document already is in the index, we perform an update and reindex the document partial: if True, the fields specified will be added to the existing document. This has the added benefit that any fields specified with `no_index` will not be reindexed again. Implies `replace` language: Specify the language used for document tokenization. no_create: if True, the document is only updated and reindexed if it already exists. If the document does not exist, an error will be returned. Implies `replace` fields: kwargs dictionary of the document fields to be saved and/or indexed. NOTE: Geo points shoule be encoded as strings of "lon,lat" """ # noqa return self._add_document( doc_id, conn=None, nosave=nosave, score=score, payload=payload, replace=replace, partial=partial, language=language, no_create=no_create, **fields, ) @deprecated_function( version="2.0.0", reason="deprecated since redisearch 2.0, call hset instead" ) def add_document_hash(self, doc_id, score=1.0, language=None, replace=False): """ Add a hash document to the index. ### Parameters - **doc_id**: the document's id. This has to be an existing HASH key in Redis that will hold the fields the index needs. - **score**: the document ranking, between 0.0 and 1.0 - **replace**: if True, and the document already is in the index, we perform an update and reindex the document - **language**: Specify the language used for document tokenization. """ # noqa return self._add_document_hash( doc_id, conn=None, score=score, language=language, replace=replace ) @deprecated_function(version="2.0.0", reason="deprecated since redisearch 2.0") def delete_document(self, doc_id, conn=None, delete_actual_document=False): """ Delete a document from index Returns 1 if the document was deleted, 0 if not ### Parameters - **delete_actual_document**: if set to True, RediSearch also delete the actual document if it is in the index """ # noqa args = [DEL_CMD, self.index_name, doc_id] if delete_actual_document: args.append("DD") if conn is not None: return conn.execute_command(*args) return self.execute_command(*args) def load_document(self, id): """ Load a single document by id """ fields = self.client.hgetall(id) f2 = {to_string(k): to_string(v) for k, v in fields.items()} fields = f2 try: del fields["id"] except KeyError: pass return Document(id=id, **fields) @deprecated_function(version="2.0.0", reason="deprecated since redisearch 2.0") def get(self, *ids): """ Returns the full contents of multiple documents. ### Parameters - **ids**: the ids of the saved documents. """ return self.execute_command(MGET_CMD, self.index_name, *ids) def info(self): """ Get info an stats about the the current index, including the number of documents, memory consumption, etc For more information see `FT.INFO <https://redis.io/commands/ft.info>`_. """ res = self.execute_command(INFO_CMD, self.index_name) return self._parse_results(INFO_CMD, res) def get_params_args( self, query_params: Union[Dict[str, Union[str, int, float, bytes]], None] ): if query_params is None: return [] args = [] if len(query_params) > 0: args.append("params") args.append(len(query_params) * 2) for key, value in query_params.items(): args.append(key) args.append(value) return args def _mk_query_args( self, query, query_params: Union[Dict[str, Union[str, int, float, bytes]], None] ): args = [self.index_name] if isinstance(query, str): # convert the query from a text to a query object query = Query(query) if not isinstance(query, Query): raise ValueError(f"Bad query type {type(query)}") args += query.get_args() args += self.get_params_args(query_params) return args, query def search( self, query: Union[str, Query], query_params: Union[Dict[str, Union[str, int, float, bytes]], None] = None, ): """ Search the index for a given query, and return a result of documents ### Parameters - **query**: the search query. Either a text for simple queries with default parameters, or a Query object for complex queries. See RediSearch's documentation on query format For more information see `FT.SEARCH <https://redis.io/commands/ft.search>`_. """ # noqa args, query = self._mk_query_args(query, query_params=query_params) st = time.monotonic() options = {} if get_protocol_version(self.client) not in ["3", 3]: options[NEVER_DECODE] = True res = self.execute_command(SEARCH_CMD, *args, **options) if isinstance(res, Pipeline): return res return self._parse_results( SEARCH_CMD, res, query=query, duration=(time.monotonic() - st) * 1000.0 ) def explain( self, query: Union[str, Query], query_params: Dict[str, Union[str, int, float]] = None, ): """Returns the execution plan for a complex query. For more information see `FT.EXPLAIN <https://redis.io/commands/ft.explain>`_. """ # noqa args, query_text = self._mk_query_args(query, query_params=query_params) return self.execute_command(EXPLAIN_CMD, *args) def explain_cli(self, query: Union[str, Query]): # noqa raise NotImplementedError("EXPLAINCLI will not be implemented.") def aggregate( self, query: Union[AggregateRequest, Cursor], query_params: Dict[str, Union[str, int, float]] = None, ): """ Issue an aggregation query. ### Parameters **query**: This can be either an `AggregateRequest`, or a `Cursor` An `AggregateResult` object is returned. You can access the rows from its `rows` property, which will always yield the rows of the result. For more information see `FT.AGGREGATE <https://redis.io/commands/ft.aggregate>`_. """ # noqa if isinstance(query, AggregateRequest): has_cursor = bool(query._cursor) cmd = [AGGREGATE_CMD, self.index_name] + query.build_args() elif isinstance(query, Cursor): has_cursor = True cmd = [CURSOR_CMD, "READ", self.index_name] + query.build_args() else: raise ValueError("Bad query", query) cmd += self.get_params_args(query_params) raw = self.execute_command(*cmd) return self._parse_results( AGGREGATE_CMD, raw, query=query, has_cursor=has_cursor ) def _get_aggregate_result( self, raw: List, query: Union[AggregateRequest, Cursor], has_cursor: bool ): if has_cursor: if isinstance(query, Cursor): query.cid = raw[1] cursor = query else: cursor = Cursor(raw[1]) raw = raw[0] else: cursor = None if isinstance(query, AggregateRequest) and query._with_schema: schema = raw[0] rows = raw[2:] else: schema = None rows = raw[1:] return AggregateResult(rows, cursor, schema) def profile( self, query: Union[Query, AggregateRequest], limited: bool = False, query_params: Optional[Dict[str, Union[str, int, float]]] = None, ): """ Performs a search or aggregate command and collects performance information. ### Parameters **query**: This can be either an `AggregateRequest` or `Query`. **limited**: If set to True, removes details of reader iterator. **query_params**: Define one or more value parameters. Each parameter has a name and a value. """ st = time.monotonic() cmd = [PROFILE_CMD, self.index_name, ""] if limited: cmd.append("LIMITED") cmd.append("QUERY") if isinstance(query, AggregateRequest): cmd[2] = "AGGREGATE" cmd += query.build_args() elif isinstance(query, Query): cmd[2] = "SEARCH" cmd += query.get_args() cmd += self.get_params_args(query_params) else: raise ValueError("Must provide AggregateRequest object or Query object.") res = self.execute_command(*cmd) return self._parse_results( PROFILE_CMD, res, query=query, duration=(time.monotonic() - st) * 1000.0 ) def spellcheck(self, query, distance=None, include=None, exclude=None): """ Issue a spellcheck query Args: query: search query. distance: the maximal Levenshtein distance for spelling suggestions (default: 1, max: 4). include: specifies an inclusion custom dictionary. exclude: specifies an exclusion custom dictionary. For more information see `FT.SPELLCHECK <https://redis.io/commands/ft.spellcheck>`_. """ # noqa cmd = [SPELLCHECK_CMD, self.index_name, query] if distance: cmd.extend(["DISTANCE", distance]) if include: cmd.extend(["TERMS", "INCLUDE", include]) if exclude: cmd.extend(["TERMS", "EXCLUDE", exclude]) res = self.execute_command(*cmd) return self._parse_results(SPELLCHECK_CMD, res) def dict_add(self, name: str, *terms: List[str]): """Adds terms to a dictionary. ### Parameters - **name**: Dictionary name. - **terms**: List of items for adding to the dictionary. For more information see `FT.DICTADD <https://redis.io/commands/ft.dictadd>`_. """ # noqa cmd = [DICT_ADD_CMD, name] cmd.extend(terms) return self.execute_command(*cmd) def dict_del(self, name: str, *terms: List[str]): """Deletes terms from a dictionary. ### Parameters - **name**: Dictionary name. - **terms**: List of items for removing from the dictionary. For more information see `FT.DICTDEL <https://redis.io/commands/ft.dictdel>`_. """ # noqa cmd = [DICT_DEL_CMD, name] cmd.extend(terms) return self.execute_command(*cmd) def dict_dump(self, name: str): """Dumps all terms in the given dictionary. ### Parameters - **name**: Dictionary name. For more information see `FT.DICTDUMP <https://redis.io/commands/ft.dictdump>`_. """ # noqa cmd = [DICT_DUMP_CMD, name] return self.execute_command(*cmd) @deprecated_function( version="8.0.0", reason="deprecated since Redis 8.0, call config_set from core module instead", ) def config_set(self, option: str, value: str) -> bool: """Set runtime configuration option. ### Parameters - **option**: the name of the configuration option. - **value**: a value for the configuration option. For more information see `FT.CONFIG SET <https://redis.io/commands/ft.config-set>`_. """ # noqa cmd = [CONFIG_CMD, "SET", option, value] raw = self.execute_command(*cmd) return raw == "OK" @deprecated_function( version="8.0.0", reason="deprecated since Redis 8.0, call config_get from core module instead", ) def config_get(self, option: str) -> str: """Get runtime configuration option value. ### Parameters - **option**: the name of the configuration option. For more information see `FT.CONFIG GET <https://redis.io/commands/ft.config-get>`_. """ # noqa cmd = [CONFIG_CMD, "GET", option] res = self.execute_command(*cmd) return self._parse_results(CONFIG_CMD, res) def tagvals(self, tagfield: str): """ Return a list of all possible tag values ### Parameters - **tagfield**: Tag field name For more information see `FT.TAGVALS <https://redis.io/commands/ft.tagvals>`_. """ # noqa return self.execute_command(TAGVALS_CMD, self.index_name, tagfield) def aliasadd(self, alias: str): """ Alias a search index - will fail if alias already exists ### Parameters - **alias**: Name of the alias to create For more information see `FT.ALIASADD <https://redis.io/commands/ft.aliasadd>`_. """ # noqa return self.execute_command(ALIAS_ADD_CMD, alias, self.index_name) def aliasupdate(self, alias: str): """ Updates an alias - will fail if alias does not already exist ### Parameters - **alias**: Name of the alias to create For more information see `FT.ALIASUPDATE <https://redis.io/commands/ft.aliasupdate>`_. """ # noqa return self.execute_command(ALIAS_UPDATE_CMD, alias, self.index_name) def aliasdel(self, alias: str): """ Removes an alias to a search index ### Parameters - **alias**: Name of the alias to delete For more information see `FT.ALIASDEL <https://redis.io/commands/ft.aliasdel>`_. """ # noqa return self.execute_command(ALIAS_DEL_CMD, alias) def sugadd(self, key, *suggestions, **kwargs): """ Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string. If kwargs["increment"] is true and the terms are already in the server's dictionary, we increment their scores. For more information see `FT.SUGADD <https://redis.io/commands/ft.sugadd/>`_. """ # noqa # If Transaction is not False it will MULTI/EXEC which will error pipe = self.pipeline(transaction=False) for sug in suggestions: args = [SUGADD_COMMAND, key, sug.string, sug.score] if kwargs.get("increment"): args.append("INCR") if sug.payload: args.append("PAYLOAD") args.append(sug.payload) pipe.execute_command(*args) return pipe.execute()[-1] def suglen(self, key: str) -> int: """ Return the number of entries in the AutoCompleter index. For more information see `FT.SUGLEN <https://redis.io/commands/ft.suglen>`_. """ # noqa return self.execute_command(SUGLEN_COMMAND, key) def sugdel(self, key: str, string: str) -> int: """ Delete a string from the AutoCompleter index. Returns 1 if the string was found and deleted, 0 otherwise. For more information see `FT.SUGDEL <https://redis.io/commands/ft.sugdel>`_. """ # noqa return self.execute_command(SUGDEL_COMMAND, key, string) def sugget( self, key: str, prefix: str, fuzzy: bool = False, num: int = 10, with_scores: bool = False, with_payloads: bool = False, ) -> List[SuggestionParser]: """ Get a list of suggestions from the AutoCompleter, for a given prefix. Parameters: prefix : str The prefix we are searching. **Must be valid ascii or utf-8** fuzzy : bool If set to true, the prefix search is done in fuzzy mode. **NOTE**: Running fuzzy searches on short (<3 letters) prefixes can be very slow, and even scan the entire index. with_scores : bool If set to true, we also return the (refactored) score of each suggestion. This is normally not needed, and is NOT the original score inserted into the index. with_payloads : bool Return suggestion payloads num : int The maximum number of results we return. Note that we might return less. The algorithm trims irrelevant suggestions. Returns: list: A list of Suggestion objects. If with_scores was False, the score of all suggestions is 1. For more information see `FT.SUGGET <https://redis.io/commands/ft.sugget>`_. """ # noqa args = [SUGGET_COMMAND, key, prefix, "MAX", num] if fuzzy: args.append(FUZZY) if with_scores: args.append(WITHSCORES) if with_payloads: args.append(WITHPAYLOADS) res = self.execute_command(*args) results = [] if not res: return results parser = SuggestionParser(with_scores, with_payloads, res) return [s for s in parser] def synupdate(self, groupid: str, skipinitial: bool = False, *terms: List[str]): """ Updates a synonym group. The command is used to create or update a synonym group with additional terms. Only documents which were indexed after the update will be affected. Parameters: groupid : Synonym group id. skipinitial : bool If set to true, we do not scan and index. terms : The terms. For more information see `FT.SYNUPDATE <https://redis.io/commands/ft.synupdate>`_. """ # noqa cmd = [SYNUPDATE_CMD, self.index_name, groupid] if skipinitial: cmd.extend(["SKIPINITIALSCAN"]) cmd.extend(terms) return self.execute_command(*cmd) def syndump(self): """ Dumps the contents of a synonym group. The command is used to dump the synonyms data structure. Returns a list of synonym terms and their synonym group ids. For more information see `FT.SYNDUMP <https://redis.io/commands/ft.syndump>`_. """ # noqa res = self.execute_command(SYNDUMP_CMD, self.index_name) return self._parse_results(SYNDUMP_CMD, res) class AsyncSearchCommands(SearchCommands): async def info(self): """ Get info an stats about the the current index, including the number of documents, memory consumption, etc For more information see `FT.INFO <https://redis.io/commands/ft.info>`_. """ res = await self.execute_command(INFO_CMD, self.index_name) return self._parse_results(INFO_CMD, res) async def search( self, query: Union[str, Query], query_params: Dict[str, Union[str, int, float]] = None, ): """ Search the index for a given query, and return a result of documents ### Parameters - **query**: the search query. Either a text for simple queries with default parameters, or a Query object for complex queries. See RediSearch's documentation on query format For more information see `FT.SEARCH <https://redis.io/commands/ft.search>`_. """ # noqa args, query = self._mk_query_args(query, query_params=query_params) st = time.monotonic() options = {} if get_protocol_version(self.client) not in ["3", 3]: options[NEVER_DECODE] = True res = await self.execute_command(SEARCH_CMD, *args, **options) if isinstance(res, Pipeline): return res return self._parse_results( SEARCH_CMD, res, query=query, duration=(time.monotonic() - st) * 1000.0 ) async def aggregate( self, query: Union[AggregateResult, Cursor], query_params: Dict[str, Union[str, int, float]] = None, ): """ Issue an aggregation query. ### Parameters **query**: This can be either an `AggregateRequest`, or a `Cursor` An `AggregateResult` object is returned. You can access the rows from its `rows` property, which will always yield the rows of the result. For more information see `FT.AGGREGATE <https://redis.io/commands/ft.aggregate>`_. """ # noqa if isinstance(query, AggregateRequest): has_cursor = bool(query._cursor) cmd = [AGGREGATE_CMD, self.index_name] + query.build_args() elif isinstance(query, Cursor): has_cursor = True cmd = [CURSOR_CMD, "READ", self.index_name] + query.build_args() else: raise ValueError("Bad query", query) cmd += self.get_params_args(query_params) raw = await self.execute_command(*cmd) return self._parse_results( AGGREGATE_CMD, raw, query=query, has_cursor=has_cursor ) async def spellcheck(self, query, distance=None, include=None, exclude=None): """ Issue a spellcheck query ### Parameters **query**: search query. **distance***: the maximal Levenshtein distance for spelling suggestions (default: 1, max: 4). **include**: specifies an inclusion custom dictionary. **exclude**: specifies an exclusion custom dictionary. For more information see `FT.SPELLCHECK <https://redis.io/commands/ft.spellcheck>`_. """ # noqa cmd = [SPELLCHECK_CMD, self.index_name, query] if distance: cmd.extend(["DISTANCE", distance]) if include: cmd.extend(["TERMS", "INCLUDE", include]) if exclude: cmd.extend(["TERMS", "EXCLUDE", exclude]) res = await self.execute_command(*cmd) return self._parse_results(SPELLCHECK_CMD, res) @deprecated_function( version="8.0.0", reason="deprecated since Redis 8.0, call config_set from core module instead", ) async def config_set(self, option: str, value: str) -> bool: """Set runtime configuration option. ### Parameters - **option**: the name of the configuration option. - **value**: a value for the configuration option. For more information see `FT.CONFIG SET <https://redis.io/commands/ft.config-set>`_. """ # noqa cmd = [CONFIG_CMD, "SET", option, value] raw = await self.execute_command(*cmd) return raw == "OK" @deprecated_function( version="8.0.0", reason="deprecated since Redis 8.0, call config_get from core module instead", ) async def config_get(self, option: str) -> str: """Get runtime configuration option value. ### Parameters - **option**: the name of the configuration option. For more information see `FT.CONFIG GET <https://redis.io/commands/ft.config-get>`_. """ # noqa cmd = [CONFIG_CMD, "GET", option] res = {} res = await self.execute_command(*cmd) return self._parse_results(CONFIG_CMD, res) async def load_document(self, id): """ Load a single document by id """ fields = await self.client.hgetall(id) f2 = {to_string(k): to_string(v) for k, v in fields.items()} fields = f2 try: del fields["id"] except KeyError: pass return Document(id=id, **fields) async def sugadd(self, key, *suggestions, **kwargs): """ Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string. If kwargs["increment"] is true and the terms are already in the server's dictionary, we increment their scores. For more information see `FT.SUGADD <https://redis.io/commands/ft.sugadd>`_. """ # noqa # If Transaction is not False it will MULTI/EXEC which will error pipe = self.pipeline(transaction=False) for sug in suggestions: args = [SUGADD_COMMAND, key, sug.string, sug.score] if kwargs.get("increment"): args.append("INCR") if sug.payload: args.append("PAYLOAD") args.append(sug.payload) pipe.execute_command(*args) return (await pipe.execute())[-1] async def sugget( self, key: str, prefix: str, fuzzy: bool = False, num: int = 10, with_scores: bool = False, with_payloads: bool = False, ) -> List[SuggestionParser]: """ Get a list of suggestions from the AutoCompleter, for a given prefix. Parameters: prefix : str The prefix we are searching. **Must be valid ascii or utf-8** fuzzy : bool If set to true, the prefix search is done in fuzzy mode. **NOTE**: Running fuzzy searches on short (<3 letters) prefixes can be very slow, and even scan the entire index. with_scores : bool If set to true, we also return the (refactored) score of each suggestion. This is normally not needed, and is NOT the original score inserted into the index. with_payloads : bool Return suggestion payloads num : int The maximum number of results we return. Note that we might return less. The algorithm trims irrelevant suggestions. Returns: list: A list of Suggestion objects. If with_scores was False, the score of all suggestions is 1. For more information see `FT.SUGGET <https://redis.io/commands/ft.sugget>`_. """ # noqa args = [SUGGET_COMMAND, key, prefix, "MAX", num] if fuzzy: args.append(FUZZY) if with_scores: args.append(WITHSCORES) if with_payloads: args.append(WITHPAYLOADS) ret = await self.execute_command(*args) results = [] if not ret: return results parser = SuggestionParser(with_scores, with_payloads, ret) return [s for s in parser]