PNG  IHDRX cHRMz&u0`:pQ<bKGD pHYsodtIME MeqIDATxw]Wug^Qd˶ 6`!N:!@xI~)%7%@Bh&`lnjVF29gΨ4E$|>cɚ{gk= %,a KX%,a KX%,a KX%,a KX%,a KX%,a KX%, b` ǟzeאfp]<!SJmɤY޲ڿ,%c ~ع9VH.!Ͳz&QynֺTkRR.BLHi٪:l;@(!MԴ=žI,:o&N'Kù\vRmJ雵֫AWic H@" !: Cé||]k-Ha oݜ:y F())u]aG7*JV@J415p=sZH!=!DRʯvɱh~V\}v/GKY$n]"X"}t@ xS76^[bw4dsce)2dU0 CkMa-U5tvLƀ~mlMwfGE/-]7XAƟ`׮g ewxwC4\[~7@O-Q( a*XGƒ{ ՟}$_y3tĐƤatgvێi|K=uVyrŲlLӪuܿzwk$m87k( `múcE)"@rK( z4$D; 2kW=Xb$V[Ru819קR~qloѱDyįݎ*mxw]y5e4K@ЃI0A D@"BDk_)N\8͜9dz"fK0zɿvM /.:2O{ Nb=M=7>??Zuo32 DLD@D| &+֎C #B8ַ`bOb $D#ͮҪtx]%`ES`Ru[=¾!@Od37LJ0!OIR4m]GZRJu$‡c=%~s@6SKy?CeIh:[vR@Lh | (BhAMy=݃  G"'wzn޺~8ԽSh ~T*A:xR[ܹ?X[uKL_=fDȊ؂p0}7=D$Ekq!/t.*2ʼnDbŞ}DijYaȲ(""6HA;:LzxQ‘(SQQ}*PL*fc\s `/d'QXW, e`#kPGZuŞuO{{wm[&NBTiiI0bukcA9<4@SӊH*؎4U/'2U5.(9JuDfrޱtycU%j(:RUbArLֺN)udA':uGQN"-"Is.*+k@ `Ojs@yU/ H:l;@yyTn}_yw!VkRJ4P)~y#)r,D =ě"Q]ci'%HI4ZL0"MJy 8A{ aN<8D"1#IJi >XjX֔#@>-{vN!8tRݻ^)N_╗FJEk]CT՟ YP:_|H1@ CBk]yKYp|og?*dGvzنzӴzjֺNkC~AbZƷ`.H)=!QͷVTT(| u78y֮}|[8-Vjp%2JPk[}ԉaH8Wpqhwr:vWª<}l77_~{s۴V+RCģ%WRZ\AqHifɤL36: #F:p]Bq/z{0CU6ݳEv_^k7'>sq*+kH%a`0ԣisqにtү04gVgW΂iJiS'3w.w}l6MC2uԯ|>JF5`fV5m`Y**Db1FKNttu]4ccsQNnex/87+}xaUW9y>ͯ骵G{䩓Գ3+vU}~jJ.NFRD7<aJDB1#ҳgSb,+CS?/ VG J?|?,2#M9}B)MiE+G`-wo߫V`fio(}S^4e~V4bHOYb"b#E)dda:'?}׮4繏`{7Z"uny-?ǹ;0MKx{:_pÚmFמ:F " .LFQLG)Q8qN q¯¯3wOvxDb\. BKD9_NN &L:4D{mm o^tֽ:q!ƥ}K+<"m78N< ywsard5+вz~mnG)=}lYݧNj'QJS{S :UYS-952?&O-:W}(!6Mk4+>A>j+i|<<|;ر^߉=HE|V#F)Emm#}/"y GII웻Jі94+v뾧xu~5C95~ūH>c@덉pʃ1/4-A2G%7>m;–Y,cyyaln" ?ƻ!ʪ<{~h~i y.zZB̃/,雋SiC/JFMmBH&&FAbϓO^tubbb_hZ{_QZ-sύodFgO(6]TJA˯#`۶ɟ( %$&+V'~hiYy>922 Wp74Zkq+Ovn錄c>8~GqܲcWꂎz@"1A.}T)uiW4="jJ2W7mU/N0gcqܗOO}?9/wìXžΏ0 >֩(V^Rh32!Hj5`;O28؇2#ݕf3 ?sJd8NJ@7O0 b־?lldщ̡&|9C.8RTWwxWy46ah嘦mh٤&l zCy!PY?: CJyв]dm4ǜҐR޻RլhX{FƯanшQI@x' ao(kUUuxW_Ñ줮[w8 FRJ(8˼)_mQ _!RJhm=!cVmm ?sFOnll6Qk}alY}; "baӌ~M0w,Ggw2W:G/k2%R,_=u`WU R.9T"v,<\Ik޽/2110Ӿxc0gyC&Ny޽JҢrV6N ``یeA16"J³+Rj*;BϜkZPJaÍ<Jyw:NP8/D$ 011z֊Ⱳ3ι֘k1V_"h!JPIΣ'ɜ* aEAd:ݺ>y<}Lp&PlRfTb1]o .2EW\ͮ]38؋rTJsǏP@芎sF\> P^+dYJLbJ C-xϐn> ι$nj,;Ǖa FU *择|h ~izť3ᤓ`K'-f tL7JK+vf2)V'-sFuB4i+m+@My=O҈0"|Yxoj,3]:cо3 $#uŘ%Y"y죯LebqtҢVzq¼X)~>4L׶m~[1_k?kxֺQ`\ |ٛY4Ѯr!)N9{56(iNq}O()Em]=F&u?$HypWUeB\k]JɩSع9 Zqg4ZĊo oMcjZBU]B\TUd34ݝ~:7ڶSUsB0Z3srx 7`:5xcx !qZA!;%͚7&P H<WL!džOb5kF)xor^aujƍ7 Ǡ8/p^(L>ὴ-B,{ۇWzֺ^k]3\EE@7>lYBȝR.oHnXO/}sB|.i@ɥDB4tcm,@ӣgdtJ!lH$_vN166L__'Z)y&kH;:,Y7=J 9cG) V\hjiE;gya~%ks_nC~Er er)muuMg2;֫R)Md) ,¶ 2-wr#F7<-BBn~_(o=KO㭇[Xv eN_SMgSҐ BS헃D%g_N:/pe -wkG*9yYSZS.9cREL !k}<4_Xs#FmҶ:7R$i,fi!~' # !6/S6y@kZkZcX)%5V4P]VGYq%H1!;e1MV<!ϐHO021Dp= HMs~~a)ަu7G^];git!Frl]H/L$=AeUvZE4P\.,xi {-~p?2b#amXAHq)MWǾI_r`S Hz&|{ +ʖ_= (YS(_g0a03M`I&'9vl?MM+m~}*xT۲(fY*V4x@29s{DaY"toGNTO+xCAO~4Ϳ;p`Ѫ:>Ҵ7K 3}+0 387x\)a"/E>qpWB=1 ¨"MP(\xp߫́A3+J] n[ʼnӼaTbZUWb={~2ooKױӰp(CS\S筐R*JغV&&"FA}J>G֐p1ٸbk7 ŘH$JoN <8s^yk_[;gy-;߉DV{c B yce% aJhDȶ 2IdйIB/^n0tNtџdcKj4϶v~- CBcgqx9= PJ) dMsjpYB] GD4RDWX +h{y`,3ꊕ$`zj*N^TP4L:Iz9~6s) Ga:?y*J~?OrMwP\](21sZUD ?ܟQ5Q%ggW6QdO+\@ ̪X'GxN @'4=ˋ+*VwN ne_|(/BDfj5(Dq<*tNt1х!MV.C0 32b#?n0pzj#!38}޴o1KovCJ`8ŗ_"]] rDUy޲@ Ȗ-;xџ'^Y`zEd?0„ DAL18IS]VGq\4o !swV7ˣι%4FѮ~}6)OgS[~Q vcYbL!wG3 7띸*E Pql8=jT\꘿I(z<[6OrR8ºC~ډ]=rNl[g|v TMTղb-o}OrP^Q]<98S¤!k)G(Vkwyqyr޽Nv`N/e p/~NAOk \I:G6]4+K;j$R:Mi #*[AȚT,ʰ,;N{HZTGMoּy) ]%dHء9Պ䠬|<45,\=[bƟ8QXeB3- &dҩ^{>/86bXmZ]]yޚN[(WAHL$YAgDKp=5GHjU&99v簪C0vygln*P)9^͞}lMuiH!̍#DoRBn9l@ xA/_v=ȺT{7Yt2N"4!YN`ae >Q<XMydEB`VU}u]嫇.%e^ánE87Mu\t`cP=AD/G)sI"@MP;)]%fH9'FNsj1pVhY&9=0pfuJ&gޤx+k:!r˭wkl03׼Ku C &ѓYt{.O.zҏ z}/tf_wEp2gvX)GN#I ݭ߽v/ .& и(ZF{e"=V!{zW`, ]+LGz"(UJp|j( #V4, 8B 0 9OkRrlɱl94)'VH9=9W|>PS['G(*I1==C<5"Pg+x'K5EMd؞Af8lG ?D FtoB[je?{k3zQ vZ;%Ɠ,]E>KZ+T/ EJxOZ1i #T<@ I}q9/t'zi(EMqw`mYkU6;[t4DPeckeM;H}_g pMww}k6#H㶏+b8雡Sxp)&C $@'b,fPߑt$RbJ'vznuS ~8='72_`{q纶|Q)Xk}cPz9p7O:'|G~8wx(a 0QCko|0ASD>Ip=4Q, d|F8RcU"/KM opKle M3#i0c%<7׿p&pZq[TR"BpqauIp$ 8~Ĩ!8Սx\ւdT>>Z40ks7 z2IQ}ItԀ<-%S⍤};zIb$I 5K}Q͙D8UguWE$Jh )cu4N tZl+[]M4k8֦Zeq֮M7uIqG 1==tLtR,ƜSrHYt&QP윯Lg' I,3@P'}'R˪e/%-Auv·ñ\> vDJzlӾNv5:|K/Jb6KI9)Zh*ZAi`?S {aiVDԲuy5W7pWeQJk֤#5&V<̺@/GH?^τZL|IJNvI:'P=Ϛt"¨=cud S Q.Ki0 !cJy;LJR;G{BJy޺[^8fK6)=yʊ+(k|&xQ2`L?Ȓ2@Mf 0C`6-%pKpm')c$׻K5[J*U[/#hH!6acB JA _|uMvDyk y)6OPYjœ50VT K}cǻP[ $:]4MEA.y)|B)cf-A?(e|lɉ#P9V)[9t.EiQPDѠ3ϴ;E:+Օ t ȥ~|_N2,ZJLt4! %ա]u {+=p.GhNcŞQI?Nd'yeh n7zi1DB)1S | S#ًZs2|Ɛy$F SxeX{7Vl.Src3E℃Q>b6G ўYCmtկ~=K0f(=LrAS GN'ɹ9<\!a`)֕y[uՍ[09` 9 +57ts6}b4{oqd+J5fa/,97J#6yν99mRWxJyѡyu_TJc`~W>l^q#Ts#2"nD1%fS)FU w{ܯ R{ ˎ󅃏џDsZSQS;LV;7 Od1&1n$ N /.q3~eNɪ]E#oM~}v֯FڦwyZ=<<>Xo稯lfMFV6p02|*=tV!c~]fa5Y^Q_WN|Vs 0ҘދU97OI'N2'8N֭fgg-}V%y]U4 峧p*91#9U kCac_AFңĪy뚇Y_AiuYyTTYЗ-(!JFLt›17uTozc. S;7A&&<ԋ5y;Ro+:' *eYJkWR[@F %SHWP 72k4 qLd'J "zB6{AC0ƁA6U.'F3:Ȅ(9ΜL;D]m8ڥ9}dU "v!;*13Rg^fJyShyy5auA?ɩGHRjo^]׽S)Fm\toy 4WQS@mE#%5ʈfFYDX ~D5Ϡ9tE9So_aU4?Ѽm%&c{n>.KW1Tlb}:j uGi(JgcYj0qn+>) %\!4{LaJso d||u//P_y7iRJ߬nHOy) l+@$($VFIQ9%EeKʈU. ia&FY̒mZ=)+qqoQn >L!qCiDB;Y<%} OgBxB!ØuG)WG9y(Ą{_yesuZmZZey'Wg#C~1Cev@0D $a@˲(.._GimA:uyw֬%;@!JkQVM_Ow:P.s\)ot- ˹"`B,e CRtaEUP<0'}r3[>?G8xU~Nqu;Wm8\RIkբ^5@k+5(By'L&'gBJ3ݶ!/㮻w҅ yqPWUg<e"Qy*167΃sJ\oz]T*UQ<\FԎ`HaNmڜ6DysCask8wP8y9``GJ9lF\G g's Nn͵MLN֪u$| /|7=]O)6s !ĴAKh]q_ap $HH'\1jB^s\|- W1:=6lJBqjY^LsPk""`]w)󭃈,(HC ?䔨Y$Sʣ{4Z+0NvQkhol6C.婧/u]FwiVjZka&%6\F*Ny#8O,22+|Db~d ~Çwc N:FuuCe&oZ(l;@ee-+Wn`44AMK➝2BRՈt7g*1gph9N) *"TF*R(#'88pm=}X]u[i7bEc|\~EMn}P瘊J)K.0i1M6=7'_\kaZ(Th{K*GJyytw"IO-PWJk)..axӝ47"89Cc7ĐBiZx 7m!fy|ϿF9CbȩV 9V-՛^pV̌ɄS#Bv4-@]Vxt-Z, &ֺ*diؠ2^VXbs֔Ìl.jQ]Y[47gj=幽ex)A0ip׳ W2[ᎇhuE^~q흙L} #-b۸oFJ_QP3r6jr+"nfzRJTUqoaۍ /$d8Mx'ݓ= OՃ| )$2mcM*cЙj}f };n YG w0Ia!1Q.oYfr]DyISaP}"dIӗթO67jqR ҊƐƈaɤGG|h;t]䗖oSv|iZqX)oalv;۩meEJ\!8=$4QU4Xo&VEĊ YS^E#d,yX_> ۘ-e\ "Wa6uLĜZi`aD9.% w~mB(02G[6y.773a7 /=o7D)$Z 66 $bY^\CuP. (x'"J60׿Y:Oi;F{w佩b+\Yi`TDWa~|VH)8q/=9!g߆2Y)?ND)%?Ǐ`k/sn:;O299yB=a[Ng 3˲N}vLNy;*?x?~L&=xyӴ~}q{qE*IQ^^ͧvü{Huu=R|>JyUlZV, B~/YF!Y\u_ݼF{_C)LD]m {H 0ihhadd nUkf3oٺCvE\)QJi+֥@tDJkB$1!Đr0XQ|q?d2) Ӣ_}qv-< FŊ߫%roppVBwü~JidY4:}L6M7f٬F "?71<2#?Jyy4뷢<_a7_=Q E=S1И/9{+93֮E{ǂw{))?maÆm(uLE#lïZ  ~d];+]h j?!|$F}*"4(v'8s<ŏUkm7^7no1w2ؗ}TrͿEk>p'8OB7d7R(A 9.*Mi^ͳ; eeUwS+C)uO@ =Sy]` }l8^ZzRXj[^iUɺ$tj))<sbDJfg=Pk_{xaKo1:-uyG0M ԃ\0Lvuy'ȱc2Ji AdyVgVh!{]/&}}ċJ#%d !+87<;qN޼Nفl|1N:8ya  8}k¾+-$4FiZYÔXk*I&'@iI99)HSh4+2G:tGhS^繿 Kتm0 вDk}֚+QT4;sC}rՅE,8CX-e~>G&'9xpW,%Fh,Ry56Y–hW-(v_,? ; qrBk4-V7HQ;ˇ^Gv1JVV%,ik;D_W!))+BoS4QsTM;gt+ndS-~:11Sgv!0qRVh!"Ȋ(̦Yl.]PQWgٳE'`%W1{ndΗBk|Ž7ʒR~,lnoa&:ü$ 3<a[CBݮwt"o\ePJ=Hz"_c^Z.#ˆ*x z̝grY]tdkP*:97YľXyBkD4N.C_[;F9`8& !AMO c `@BA& Ost\-\NX+Xp < !bj3C&QL+*&kAQ=04}cC!9~820G'PC9xa!w&bo_1 Sw"ܱ V )Yl3+ס2KoXOx]"`^WOy :3GO0g;%Yv㐫(R/r (s } u B &FeYZh0y> =2<Ϟc/ -u= c&׭,.0"g"7 6T!vl#sc>{u/Oh Bᾈ)۴74]x7 gMӒ"d]U)}" v4co[ ɡs 5Gg=XR14?5A}D "b{0$L .\4y{_fe:kVS\\O]c^W52LSBDM! C3Dhr̦RtArx4&agaN3Cf<Ԉp4~ B'"1@.b_/xQ} _߃҉/gٓ2Qkqp0շpZ2fԫYz< 4L.Cyυι1t@鎫Fe sYfsF}^ V}N<_`p)alٶ "(XEAVZ<)2},:Ir*#m_YӼ R%a||EƼIJ,,+f"96r/}0jE/)s)cjW#w'Sʯ5<66lj$a~3Kʛy 2:cZ:Yh))+a߭K::N,Q F'qB]={.]h85C9cr=}*rk?vwV렵ٸW Rs%}rNAkDv|uFLBkWY YkX מ|)1!$#3%y?pF<@<Rr0}: }\J [5FRxY<9"SQdE(Q*Qʻ)q1E0B_O24[U'],lOb ]~WjHޏTQ5Syu wq)xnw8~)c 쫬gٲߠ H% k5dƝk> kEj,0% b"vi2Wس_CuK)K{n|>t{P1򨾜j>'kEkƗBg*H%'_aY6Bn!TL&ɌOb{c`'d^{t\i^[uɐ[}q0lM˕G:‚4kb祔c^:?bpg… +37stH:0}en6x˟%/<]BL&* 5&fK9Mq)/iyqtA%kUe[ڛKN]Ě^,"`/ s[EQQm?|XJ߅92m]G.E΃ח U*Cn.j_)Tѧj̿30ڇ!A0=͜ar I3$C^-9#|pk!)?7.x9 @OO;WƝZBFU keZ75F6Tc6"ZȚs2y/1 ʵ:u4xa`C>6Rb/Yм)^=+~uRd`/|_8xbB0?Ft||Z\##|K 0>>zxv8۴吅q 8ĥ)"6>~\8:qM}#͚'ĉ#p\׶ l#bA?)|g g9|8jP(cr,BwV (WliVxxᡁ@0Okn;ɥh$_ckCgriv}>=wGzβ KkBɛ[˪ !J)h&k2%07δt}!d<9;I&0wV/ v 0<H}L&8ob%Hi|޶o&h1L|u֦y~󛱢8fٲUsւ)0oiFx2}X[zVYr_;N(w]_4B@OanC?gĦx>мgx>ΛToZoOMp>40>V Oy V9iq!4 LN,ˢu{jsz]|"R޻&'ƚ{53ўFu(<٪9:΋]B;)B>1::8;~)Yt|0(pw2N%&X,URBK)3\zz&}ax4;ǟ(tLNg{N|Ǽ\G#C9g$^\}p?556]/RP.90 k,U8/u776s ʪ_01چ|\N 0VV*3H鴃J7iI!wG_^ypl}r*jɤSR 5QN@ iZ#1ٰy;_\3\BQQ x:WJv츟ٯ$"@6 S#qe딇(/P( Dy~TOϻ<4:-+F`0||;Xl-"uw$Цi󼕝mKʩorz"mϺ$F:~E'ҐvD\y?Rr8_He@ e~O,T.(ފR*cY^m|cVR[8 JҡSm!ΆԨb)RHG{?MpqrmN>߶Y)\p,d#xۆWY*,l6]v0h15M˙MS8+EdI='LBJIH7_9{Caз*Lq,dt >+~ّeʏ?xԕ4bBAŚjﵫ!'\Ը$WNvKO}ӽmSşذqsOy?\[,d@'73'j%kOe`1.g2"e =YIzS2|zŐƄa\U,dP;jhhhaxǶ?КZ՚.q SE+XrbOu%\GتX(H,N^~]JyEZQKceTQ]VGYqnah;y$cQahT&QPZ*iZ8UQQM.qo/T\7X"u?Mttl2Xq(IoW{R^ ux*SYJ! 4S.Jy~ BROS[V|žKNɛP(L6V^|cR7i7nZW1Fd@ Ara{詑|(T*dN]Ko?s=@ |_EvF]׍kR)eBJc" MUUbY6`~V޴dJKß&~'d3i WWWWWW
Current Directory: /opt/saltstack/salt/lib/python3.10/site-packages/salt/fileserver
Viewing File: /opt/saltstack/salt/lib/python3.10/site-packages/salt/fileserver/svnfs.py
""" Subversion Fileserver Backend After enabling this backend, branches and tags in a remote subversion repository are exposed to salt as different environments. To enable this backend, add ``svnfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - svnfs .. note:: ``svn`` also works here. Prior to the 2018.3.0 release, *only* ``svn`` would work. This backend assumes a standard svn layout with directories for ``branches``, ``tags``, and ``trunk``, at the repository root. :depends: - subversion - pysvn .. versionchanged:: 2014.7.0 The paths to the trunk, branches, and tags have been made configurable, via the config options :conf_master:`svnfs_trunk`, :conf_master:`svnfs_branches`, and :conf_master:`svnfs_tags`. :conf_master:`svnfs_mountpoint` was also added. Finally, support for per-remote configuration parameters was added. See the :conf_master:`documentation <svnfs_remotes>` for more information. """ import copy import errno import fnmatch import hashlib import logging import os import shutil from datetime import datetime import salt.fileserver import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.path import salt.utils.stringutils import salt.utils.url import salt.utils.versions from salt.config import DEFAULT_HASH_TYPE from salt.exceptions import FileserverConfigError from salt.utils.event import tagify PER_REMOTE_OVERRIDES = ("mountpoint", "root", "trunk", "branches", "tags") # pylint: disable=import-error HAS_SVN = False try: import pysvn HAS_SVN = True CLIENT = pysvn.Client() except ImportError: pass # pylint: enable=import-error log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = "svnfs" __virtual_aliases__ = ("svn",) def __virtual__(): """ Only load if subversion is available """ if __virtualname__ not in __opts__["fileserver_backend"]: return False if not HAS_SVN: log.error( "Subversion fileserver backend is enabled in configuration " "but could not be loaded, is pysvn installed?" ) return False errors = [] for param in ("svnfs_trunk", "svnfs_branches", "svnfs_tags"): if os.path.isabs(__opts__[param]): errors.append( "Master configuration parameter '{}' (value: {}) cannot " "be an absolute path".format(param, __opts__[param]) ) if errors: for error in errors: log.error(error) log.error("Subversion fileserver backed will be disabled") return False return __virtualname__ def _rev(repo): """ Returns revision ID of repo """ try: repo_info = dict(CLIENT.info(repo["repo"]).items()) except (pysvn._pysvn.ClientError, TypeError, KeyError, AttributeError) as exc: log.error( "Error retrieving revision ID for svnfs remote %s (cachedir: %s): %s", repo["url"], repo["repo"], exc, ) else: return repo_info["revision"].number return None def _failhard(): """ Fatal fileserver configuration issue, raise an exception """ raise FileserverConfigError("Failed to load svn fileserver backend") def init(): """ Return the list of svn remotes and their configuration information """ bp_ = os.path.join(__opts__["cachedir"], "svnfs") new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = str(__opts__[f"svnfs_{param}"]) for remote in __opts__["svnfs_remotes"]: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = { key: str(val) for key, val in salt.utils.data.repack_dictlist( remote[repo_url] ).items() } if not per_remote_conf: log.error( "Invalid per-remote configuration for remote %s. If no " "per-remote parameters are being specified, there may be " "a trailing colon after the URL, which should be removed. " "Check the master configuration file.", repo_url, ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( "Invalid configuration parameter '%s' for remote %s. " "Valid parameters are: %s. See the documentation for " "further information.", param, repo_url, ", ".join(PER_REMOTE_OVERRIDES), ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, str): log.error( "Invalid svnfs remote %s. Remotes must be strings, you may " "need to enclose the URL in quotes", repo_url, ) _failhard() try: repo_conf["mountpoint"] = salt.utils.url.strip_proto( repo_conf["mountpoint"] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get("hash_type", DEFAULT_HASH_TYPE)) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only attempt a new checkout if the directory is empty. try: CLIENT.checkout(repo_url, rp_) repos.append(rp_) new_remote = True except pysvn._pysvn.ClientError as exc: log.error("Failed to initialize svnfs remote '%s': %s", repo_url, exc) _failhard() else: # Confirm that there is an svn checkout at the necessary path by # running pysvn.Client().status() try: CLIENT.status(rp_) except pysvn._pysvn.ClientError as exc: log.error( "Cache path %s (corresponding remote: %s) exists but is " "not a valid subversion checkout. You will need to " "manually delete this directory on the master to continue " "to use this svnfs remote.", rp_, repo_url, ) _failhard() repo_conf.update( { "repo": rp_, "url": repo_url, "hash": repo_hash, "cachedir": rp_, "lockfile": os.path.join(rp_, "update.lk"), } ) repos.append(repo_conf) if new_remote: remote_map = os.path.join(__opts__["cachedir"], "svnfs/remote_map.txt") try: with salt.utils.files.fopen(remote_map, "w+") as fp_: timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f") fp_.write(f"# svnfs_remote map as of {timestamp}\n") for repo_conf in repos: fp_.write( salt.utils.stringutils.to_str( "{} = {}\n".format(repo_conf["hash"], repo_conf["url"]) ) ) except OSError: pass else: log.info("Wrote new svnfs_remote map to %s", remote_map) return repos def _clear_old_remotes(): """ Remove cache directories for remotes no longer configured """ bp_ = os.path.join(__opts__["cachedir"], "svnfs") try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo["hash"]) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ("hash", "refs"): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( "Unable to remove old svnfs remote cachedir %s: %s", rdir, exc ) failed.append(rdir) else: log.debug("svnfs removed old cachedir %s", rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): """ Completely clear svnfs cache """ fsb_cachedir = os.path.join(__opts__["cachedir"], "svnfs") list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/svnfs") errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append(f"Unable to delete {rdir}: {exc}") return errors def clear_lock(remote=None): """ Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. """ def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = "Unable to remove update lock for {} ({}): {} ".format( repo["url"], repo["lockfile"], exc ) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo["lockfile"]): try: os.remove(repo["lockfile"]) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo["lockfile"]) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = "Removed lock for {}".format(repo["url"]) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if remote not in repo["url"]: continue except TypeError: # remote was non-string, try again if str(remote) not in repo["url"]: continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): """ Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. """ def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo["lockfile"]): try: with salt.utils.files.fopen(repo["lockfile"], "w+") as fp_: fp_.write("") except OSError as exc: msg = "Unable to set update lock for {} ({}): {} ".format( repo["url"], repo["lockfile"], exc ) log.debug(msg) failed.append(msg) else: msg = "Set lock for {}".format(repo["url"]) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo["url"], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo["url"], str(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): """ Execute an svn update on all of the repos """ # data for the fileserver event data = {"changed": False, "backend": "svnfs"} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data["changed"], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo["lockfile"]): log.warning( "Update lockfile is present for svnfs remote %s, skipping. " "If this warning persists, it is possible that the update " "process was interrupted. Removing %s or running " "'salt-run fileserver.clear_lock svnfs' will allow updates " "to continue for this remote.", repo["url"], repo["lockfile"], ) continue _, errors = lock(repo) if errors: log.error( "Unable to set update lock for svnfs remote %s, skipping.", repo["url"] ) continue log.debug("svnfs is fetching from %s", repo["url"]) old_rev = _rev(repo) try: CLIENT.update(repo["repo"]) except pysvn._pysvn.ClientError as exc: log.error( "Error updating svnfs remote %s (cachedir: %s): %s", repo["url"], repo["cachedir"], exc, ) new_rev = _rev(repo) if any(x is None for x in (old_rev, new_rev)): # There were problems getting the revision ID continue if new_rev != old_rev: data["changed"] = True clear_lock(repo) env_cache = os.path.join(__opts__["cachedir"], "svnfs/envs.p") if data.get("changed", False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) with salt.utils.files.fopen(env_cache, "wb+") as fp_: fp_.write(salt.payload.dumps(new_envs)) log.trace("Wrote env cache data to %s", env_cache) # if there is a change, fire an event if __opts__.get("fileserver_events", False): with salt.utils.event.get_event( "master", __opts__["sock_dir"], opts=__opts__, listen=False, ) as event: event.fire_event(data, tagify(["svnfs", "update"], prefix="fileserver")) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__["cachedir"], "svnfs/hash"), find_file ) except OSError: # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): """ Check if an environment is exposed by comparing it against a whitelist and blacklist. """ return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=__opts__["svnfs_saltenv_whitelist"], blacklist=__opts__["svnfs_saltenv_blacklist"], ) def envs(ignore_cache=False): """ Return a list of refs that can be used as environments """ if not ignore_cache: env_cache = os.path.join(__opts__["cachedir"], "svnfs/envs.p") cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): trunk = os.path.join(repo["repo"], repo["trunk"]) if os.path.isdir(trunk): # Add base as the env for trunk ret.add("base") else: log.error( "svnfs trunk path '%s' does not exist in repo %s, no base " "environment will be provided by this remote", repo["trunk"], repo["url"], ) branches = os.path.join(repo["repo"], repo["branches"]) if os.path.isdir(branches): ret.update(os.listdir(branches)) else: log.error( "svnfs branches path '%s' does not exist in repo %s", repo["branches"], repo["url"], ) tags = os.path.join(repo["repo"], repo["tags"]) if os.path.isdir(tags): ret.update(os.listdir(tags)) else: log.error( "svnfs tags path '%s' does not exist in repo %s", repo["tags"], repo["url"], ) return [x for x in sorted(ret) if _env_is_exposed(x)] def _env_root(repo, saltenv): """ Return the root of the directory corresponding to the desired environment, or None if the environment was not found. """ # If 'base' is desired, look for the trunk if saltenv == "base": trunk = os.path.join(repo["repo"], repo["trunk"]) if os.path.isdir(trunk): return trunk else: return None # Check branches branches = os.path.join(repo["repo"], repo["branches"]) if os.path.isdir(branches) and saltenv in os.listdir(branches): return os.path.join(branches, saltenv) # Check tags tags = os.path.join(repo["repo"], repo["tags"]) if os.path.isdir(tags) and saltenv in os.listdir(tags): return os.path.join(tags, saltenv) return None def find_file(path, tgt_env="base", **kwargs): # pylint: disable=W0613 """ Find the first file to match the path and ref. This operates similarly to the roots file sever but with assumptions of the directory structure based on svn standard practices. """ fnd = {"path": "", "rel": ""} if os.path.isabs(path) or tgt_env not in envs(): return fnd for repo in init(): env_root = _env_root(repo, tgt_env) if env_root is None: # Environment not found, try the next repo continue if repo["mountpoint"] and not path.startswith(repo["mountpoint"] + os.path.sep): continue repo_path = path[len(repo["mountpoint"]) :].lstrip(os.path.sep) if repo["root"]: repo_path = os.path.join(repo["root"], repo_path) full = os.path.join(env_root, repo_path) if os.path.isfile(full): fnd["rel"] = path fnd["path"] = full try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd["stat"] = list(os.stat(full)) except Exception: # pylint: disable=broad-except pass return fnd return fnd def serve_file(load, fnd): """ Return a chunk from a file based on the data received """ if "env" in load: # "env" is not supported; Use "saltenv". load.pop("env") ret = {"data": "", "dest": ""} if not all(x in load for x in ("path", "loc", "saltenv")): return ret if not fnd["path"]: return ret ret["dest"] = fnd["rel"] gzip = load.get("gzip", None) fpath = os.path.normpath(fnd["path"]) with salt.utils.files.fopen(fpath, "rb") as fp_: fp_.seek(load["loc"]) data = fp_.read(__opts__["file_buffer_size"]) if data and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret["gzip"] = gzip ret["data"] = data return ret def file_hash(load, fnd): """ Return a file hash, the hash type is set in the master config file """ if "env" in load: # "env" is not supported; Use "saltenv". load.pop("env") if not all(x in load for x in ("path", "saltenv")): return "" saltenv = load["saltenv"] if saltenv == "base": saltenv = "trunk" ret = {} relpath = fnd["rel"] path = fnd["path"] # If the file doesn't exist, we can't get a hash if not path or not os.path.isfile(path): return ret # Set the hash_type as it is determined by config ret["hash_type"] = __opts__["hash_type"] # Check if the hash is cached # Cache file's contents should be "hash:mtime" cache_path = os.path.join( __opts__["cachedir"], "svnfs", "hash", saltenv, "{}.hash.{}".format(relpath, __opts__["hash_type"]), ) # If we have a cache, serve that if the mtime hasn't changed if os.path.exists(cache_path): with salt.utils.files.fopen(cache_path, "rb") as fp_: hsum, mtime = fp_.read().split(":") if os.path.getmtime(path) == mtime: # check if mtime changed ret["hsum"] = hsum return ret # if we don't have a cache entry-- lets make one ret["hsum"] = salt.utils.hashutils.get_hash(path, __opts__["hash_type"]) cache_dir = os.path.dirname(cache_path) # make cache directory if it doesn't exist if not os.path.exists(cache_dir): os.makedirs(cache_dir) # save the cache object "hash:mtime" with salt.utils.files.fopen(cache_path, "w") as fp_: fp_.write("{}:{}".format(ret["hsum"], os.path.getmtime(path))) return ret def _file_lists(load, form): """ Return a dict containing the file lists for files, dirs, emptydirs and symlinks """ if "env" in load: # "env" is not supported; Use "saltenv". load.pop("env") if "saltenv" not in load or load["saltenv"] not in envs(): return [] list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/svnfs") if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except OSError: log.critical("Unable to make cachedir %s", list_cachedir) return [] list_cache = os.path.join(list_cachedir, "{}.p".format(load["saltenv"])) w_lock = os.path.join(list_cachedir, ".{}.w".format(load["saltenv"])) cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {"files": set(), "dirs": set(), "empty_dirs": set()} for repo in init(): env_root = _env_root(repo, load["saltenv"]) if env_root is None: # Environment not found, try the next repo continue if repo["root"]: env_root = os.path.join(env_root, repo["root"]).rstrip(os.path.sep) if not os.path.isdir(env_root): # svnfs root (global or per-remote) does not exist in env continue for root, dirs, files in salt.utils.path.os_walk(env_root): relpath = os.path.relpath(root, env_root) dir_rel_fn = os.path.join(repo["mountpoint"], relpath) if relpath != ".": ret["dirs"].add(dir_rel_fn) if not dirs and not files: ret["empty_dirs"].add(dir_rel_fn) for fname in files: rel_fn = os.path.relpath(os.path.join(root, fname), env_root) ret["files"].add(os.path.join(repo["mountpoint"], rel_fn)) if repo["mountpoint"]: ret["dirs"].add(repo["mountpoint"]) # Convert all compiled sets to lists for key in ret: ret[key] = sorted(ret[key]) if save_cache: salt.fileserver.write_file_list_cache(__opts__, ret, list_cache, w_lock) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): """ Return a list of all files on the file server in a specified environment """ return _file_lists(load, "files") def file_list_emptydirs(load): """ Return a list of all empty directories on the master """ return _file_lists(load, "empty_dirs") def dir_list(load): """ Return a list of all directories on the master """ return _file_lists(load, "dirs")