sync.c 124 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403
  1. /*
  2. clsync - file tree sync utility based on inotify/kqueue
  3. Copyright (C) 2013-2014 Dmitry Yu Okunev <dyokunev@ut.mephi.ru> 0x8E30679C
  4. This program is free software: you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation, either version 3 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program. If not, see <http://www.gnu.org/licenses/>.
  14. */
  15. #include "common.h"
  16. #if KQUEUE_SUPPORT
  17. # include "mon_kqueue.h"
  18. #endif
  19. #if INOTIFY_SUPPORT
  20. # include "mon_inotify.h"
  21. #endif
  22. #if FANOTIFY_SUPPORT
  23. # include "mon_fanotify.h"
  24. #endif
  25. #if BSM_SUPPORT
  26. # include "mon_bsm.h"
  27. # include <bsm/audit_kevents.h>
  28. #endif
  29. #if GIO_SUPPORT
  30. # include <gio/gio.h>
  31. # include "mon_gio.h"
  32. #endif
  33. #include "main.h"
  34. #include "error.h"
  35. #include "fileutils.h"
  36. #include "malloc.h"
  37. #include "cluster.h"
  38. #include "sync.h"
  39. #include "glibex.h"
  40. #include "control.h"
  41. #include "indexes.h"
  42. #include "privileged.h"
  43. #include "rules.h"
  44. #if CGROUP_SUPPORT
  45. # include "cgroup.h"
  46. #endif
  47. #include <stdio.h>
  48. #include <dlfcn.h>
  49. pthread_t pthread_sighandler;
  50. // seqid - is a counter of main loop. But it may overflow and it's required to compare
  51. // seqid-values anyway.
  52. // So if (a-b) is too big, let's assume, that "b<a".
  53. #define SEQID_WINDOW (((unsigned int)~0)>>1)
  54. #define SEQID_EQ(a, b) ((a)==(b))
  55. #define SEQID_GE(a, b) ((a)-(b) < SEQID_WINDOW)
  56. #define SEQID_LE(a, b) ((b)-(a) < SEQID_WINDOW)
  57. #define SEQID_GT(a, b) ((!SEQID_EQ(a, b)) && (SEQID_GE(a, b)))
  58. #define SEQID_LT(a, b) ((!SEQID_EQ(a, b)) && (SEQID_LE(a, b)))
  59. static unsigned int _sync_seqid_value = 0;
  60. static inline unsigned int sync_seqid()
  61. {
  62. return _sync_seqid_value++;
  63. }
  64. static inline void setenv_iteration ( uint32_t iteration_num )
  65. {
  66. char iterations[sizeof ( "4294967296" )]; // 4294967296 == 2**32
  67. sprintf ( iterations, "%i", iteration_num );
  68. setenv ( "CLSYNC_ITERATION", iterations, 1 );
  69. return;
  70. }
  71. static inline void finish_iteration ( ctx_t *ctx_p )
  72. {
  73. if ( ctx_p->iteration_num < ( typeof ( ctx_p->iteration_num ) ) ~0 ) // ~0 is the max value for unsigned variables
  74. ctx_p->iteration_num++;
  75. #ifdef THREADING_SUPPORT
  76. if ( !ctx_p->flags[THREADING] )
  77. #endif
  78. setenv_iteration ( ctx_p->iteration_num );
  79. debug ( 3, "next iteration: %u/%u",
  80. ctx_p->iteration_num, ctx_p->flags[MAXITERATIONS] );
  81. return;
  82. }
  83. gpointer eidup ( gpointer ei_gp )
  84. {
  85. eventinfo_t *ei = ( eventinfo_t * ) ei_gp;
  86. eventinfo_t *ei_dup = ( eventinfo_t * ) xmalloc ( sizeof ( *ei ) );
  87. memcpy ( ei_dup, ei, sizeof ( *ei ) );
  88. return ( gpointer ) ei_dup;
  89. }
  90. static inline void evinfo_merge ( ctx_t *ctx_p, eventinfo_t *evinfo_dst, eventinfo_t *evinfo_src )
  91. {
  92. debug ( 3, "evinfo_dst: seqid_min == %u; seqid_max == %u; objtype_old == %i; objtype_new == %i; \t"
  93. "evinfo_src: seqid_min == %u; seqid_max == %u; objtype_old == %i; objtype_new == %i",
  94. evinfo_dst->seqid_min, evinfo_dst->seqid_max, evinfo_dst->objtype_old, evinfo_dst->objtype_new,
  95. evinfo_src->seqid_min, evinfo_src->seqid_max, evinfo_src->objtype_old, evinfo_src->objtype_new
  96. );
  97. #if KQUEUE_SUPPORT | INOTIFY_SUPPORT
  98. switch ( ctx_p->flags[MONITOR] ) {
  99. #ifdef KQUEUE_SUPPORT
  100. case NE_KQUEUE:
  101. #endif
  102. #ifdef INOTIFY_SUPPORT
  103. case NE_INOTIFY:
  104. #endif
  105. evinfo_dst->evmask |= evinfo_src->evmask;
  106. break;
  107. }
  108. #endif
  109. evinfo_dst->flags |= evinfo_src->flags;
  110. if ( SEQID_LE ( evinfo_src->seqid_min, evinfo_dst->seqid_min ) ) {
  111. evinfo_dst->objtype_old = evinfo_src->objtype_old;
  112. evinfo_dst->seqid_min = evinfo_src->seqid_min;
  113. }
  114. if ( SEQID_GE ( evinfo_src->seqid_max, evinfo_dst->seqid_max ) ) {
  115. evinfo_dst->objtype_new = evinfo_src->objtype_new;
  116. evinfo_dst->seqid_max = evinfo_src->seqid_max;
  117. switch ( ctx_p->flags[MONITOR] ) {
  118. #ifdef GIO_SUPPORT
  119. case NE_GIO:
  120. evinfo_dst->evmask = evinfo_src->evmask;
  121. break;
  122. #endif
  123. #ifdef BSM_SUPPORT
  124. case NE_BSM:
  125. case NE_BSM_PREFETCH:
  126. evinfo_dst->evmask = evinfo_src->evmask;
  127. break;
  128. #endif
  129. default:
  130. break;
  131. }
  132. }
  133. return;
  134. }
  135. static inline int _exitcode_process ( ctx_t *ctx_p, int exitcode )
  136. {
  137. if ( ctx_p->isignoredexitcode[ ( unsigned char ) exitcode] )
  138. return 0;
  139. if ( exitcode && ! ( ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) && ( exitcode == 24 ) ) ) {
  140. error ( "Got non-zero exitcode %i from __sync_exec().", exitcode );
  141. return exitcode;
  142. }
  143. return 0;
  144. }
  145. int exitcode_process ( ctx_t *ctx_p, int exitcode )
  146. {
  147. int err = _exitcode_process ( ctx_p, exitcode );
  148. if ( err ) error ( "Got error-report from exitcode_process().\nExitcode is %i, strerror(%i) returns \"%s\". However strerror() is not ensures compliance "
  149. "between exitcode and error description for every utility. So, e.g if you're using rsync, you should look for the error description "
  150. "into rsync's manpage (\"man 1 rsync\"). Also some advices about diagnostics can be found in clsync's manpage (\"man 1 clsync\", see DIAGNOSTICS)",
  151. exitcode, exitcode, strerror ( exitcode ) );
  152. return err;
  153. }
  154. threadsinfo_t *thread_info() // TODO: optimize this
  155. {
  156. static threadsinfo_t threadsinfo = {0};
  157. if ( !threadsinfo.mutex_init ) {
  158. int i = 0;
  159. while ( i < PTHREAD_MUTEX_MAX ) {
  160. if ( pthread_mutex_init ( &threadsinfo.mutex[i], NULL ) ) {
  161. error ( "Cannot pthread_mutex_init()." );
  162. return NULL;
  163. }
  164. if ( pthread_cond_init ( &threadsinfo.cond [i], NULL ) ) {
  165. error ( "Cannot pthread_cond_init()." );
  166. return NULL;
  167. }
  168. i++;
  169. }
  170. threadsinfo.mutex_init++;
  171. }
  172. return &threadsinfo;
  173. }
  174. #ifdef THREADING_SUPPORT
  175. #define thread_info_lock() _thread_info_lock(__func__)
  176. static inline threadsinfo_t *_thread_info_lock ( const char *const function_name )
  177. {
  178. threadsinfo_t *threadsinfo_p = thread_info();
  179. debug ( 4, "used by %s()", function_name );
  180. pthread_mutex_lock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_THREADSINFO] );
  181. return threadsinfo_p;
  182. }
  183. #define thread_info_unlock(...) _thread_info_unlock(__func__, __VA_ARGS__)
  184. static inline int _thread_info_unlock ( const char *const function_name, int rc )
  185. {
  186. threadsinfo_t *threadsinfo_p = thread_info();
  187. debug ( 4, "used by %s()", function_name );
  188. pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_THREADSINFO] );
  189. return rc;
  190. }
  191. int threads_foreach ( int ( *funct ) ( threadinfo_t *, void * ), state_t state, void *arg )
  192. {
  193. int i, rc;
  194. threadsinfo_t *threadsinfo_p = thread_info_lock();
  195. #ifdef PARANOID
  196. if ( threadsinfo_p == NULL )
  197. return thread_info_unlock ( EINVAL );
  198. #endif
  199. rc = 0;
  200. i = 0;
  201. while ( i < threadsinfo_p->used ) {
  202. threadinfo_t *threadinfo_p = &threadsinfo_p->threads[i++];
  203. if ( ( state == STATE_UNKNOWN ) || ( threadinfo_p->state == state ) ) {
  204. if ( ( rc = funct ( threadinfo_p, arg ) ) )
  205. break;
  206. }
  207. }
  208. return thread_info_unlock ( rc );
  209. }
  210. time_t thread_nextexpiretime()
  211. {
  212. time_t nextexpiretime = 0;
  213. threadsinfo_t *threadsinfo_p = thread_info_lock();
  214. #ifdef PARANOID
  215. if ( threadsinfo_p == NULL )
  216. return thread_info_unlock ( 0 );
  217. #endif
  218. int thread_num = threadsinfo_p->used;
  219. while ( thread_num-- ) {
  220. threadinfo_t *threadinfo_p = &threadsinfo_p->threads[thread_num];
  221. debug ( 3, "threadsinfo_p->threads[%i].state == %i;\tthreadsinfo_p->threads[%i].pthread == %p;\tthreadsinfo_p->threads[%i].expiretime == %i",
  222. thread_num, threadinfo_p->state, thread_num, threadinfo_p->pthread, thread_num, threadinfo_p->expiretime );
  223. if ( threadinfo_p->state == STATE_EXIT )
  224. continue;
  225. if ( threadinfo_p->expiretime ) {
  226. if ( nextexpiretime )
  227. nextexpiretime = MIN ( nextexpiretime, threadinfo_p->expiretime );
  228. else
  229. nextexpiretime = threadinfo_p->expiretime;
  230. }
  231. }
  232. thread_info_unlock ( 0 );
  233. debug ( 3, "nextexpiretime == %i", nextexpiretime );
  234. return nextexpiretime;
  235. }
  236. threadinfo_t *thread_new()
  237. {
  238. threadsinfo_t *threadsinfo_p = thread_info_lock();
  239. #ifdef PARANOID
  240. if ( threadsinfo_p == NULL ) {
  241. thread_info_unlock ( 0 );
  242. return NULL;
  243. }
  244. #endif
  245. int thread_num;
  246. threadinfo_t *threadinfo_p;
  247. if ( threadsinfo_p->stacklen ) {
  248. threadinfo_p = threadsinfo_p->threadsstack[--threadsinfo_p->stacklen];
  249. thread_num = threadinfo_p->thread_num;
  250. } else {
  251. if ( threadsinfo_p->used >= threadsinfo_p->allocated ) {
  252. threadsinfo_p->allocated += ALLOC_PORTION;
  253. debug ( 2, "Reallocated memory for threadsinfo -> %i.", threadsinfo_p->allocated );
  254. threadsinfo_p->threads = ( threadinfo_t * ) xrealloc ( ( char * ) threadsinfo_p->threads,
  255. sizeof ( *threadsinfo_p->threads ) * ( threadsinfo_p->allocated + 2 ) );
  256. threadsinfo_p->threadsstack = ( threadinfo_t ** ) xrealloc ( ( char * ) threadsinfo_p->threadsstack,
  257. sizeof ( *threadsinfo_p->threadsstack ) * ( threadsinfo_p->allocated + 2 ) );
  258. }
  259. thread_num = threadsinfo_p->used++;
  260. threadinfo_p = &threadsinfo_p->threads[thread_num];
  261. }
  262. #ifdef PARANOID
  263. memset ( threadinfo_p, 0, sizeof ( *threadinfo_p ) );
  264. #else
  265. threadinfo_p->expiretime = 0;
  266. threadinfo_p->errcode = 0;
  267. threadinfo_p->exitcode = 0;
  268. #endif
  269. threadinfo_p->thread_num = thread_num;
  270. threadinfo_p->state = STATE_RUNNING;
  271. debug ( 2, "thread_new -> thread_num: %i; used: %i", thread_num, threadsinfo_p->used );
  272. thread_info_unlock ( 0 );
  273. return threadinfo_p;
  274. }
  275. int thread_del_bynum ( int thread_num )
  276. {
  277. debug ( 2, "thread_del_bynum(%i)", thread_num );
  278. threadsinfo_t *threadsinfo_p = thread_info_lock();
  279. #ifdef PARANOID
  280. if ( threadsinfo_p == NULL )
  281. return thread_info_unlock ( errno );
  282. #endif
  283. if ( thread_num >= threadsinfo_p->used )
  284. return thread_info_unlock ( EINVAL );
  285. threadinfo_t *threadinfo_p = &threadsinfo_p->threads[thread_num];
  286. threadinfo_p->state = STATE_EXIT;
  287. char **ptr = threadinfo_p->argv;
  288. if ( ptr != NULL ) {
  289. while ( *ptr )
  290. free ( * ( ptr++ ) );
  291. free ( threadinfo_p->argv );
  292. }
  293. if ( thread_num == ( threadsinfo_p->used - 1 ) ) {
  294. threadsinfo_p->used--;
  295. debug ( 3, "thread_del_bynum(%i): there're %i threads left (#0).", thread_num, threadsinfo_p->used - threadsinfo_p->stacklen );
  296. return thread_info_unlock ( 0 );
  297. }
  298. threadinfo_t *t = &threadsinfo_p->threads[threadsinfo_p->used - 1];
  299. if ( t->state == STATE_EXIT ) {
  300. threadsinfo_p->used--;
  301. debug ( 3, "%i [%p] -> %i [%p]; left: %i",
  302. threadsinfo_p->used, t->pthread, thread_num, threadinfo_p->pthread, threadsinfo_p->used - threadsinfo_p->stacklen );
  303. memcpy ( threadinfo_p, t, sizeof ( *threadinfo_p ) );
  304. } else {
  305. #ifdef PARANOID
  306. if ( threadsinfo_p->stacklen >= threadsinfo_p->allocated ) {
  307. error ( "Threads metadata structures pointers stack overflowed!" );
  308. return thread_info_unlock ( EINVAL );
  309. }
  310. #endif
  311. threadsinfo_p->threadsstack[threadsinfo_p->stacklen++] = threadinfo_p;
  312. }
  313. debug ( 3, "thread_del_bynum(%i): there're %i threads left (#1).", thread_num, threadsinfo_p->used - threadsinfo_p->stacklen );
  314. return thread_info_unlock ( 0 );
  315. }
  316. int thread_gc ( ctx_t *ctx_p )
  317. {
  318. int thread_num;
  319. time_t tm = time ( NULL );
  320. debug ( 3, "tm == %i; thread %p", tm, pthread_self() );
  321. if ( !ctx_p->flags[THREADING] )
  322. return 0;
  323. threadsinfo_t *threadsinfo_p = thread_info_lock();
  324. #ifdef PARANOID
  325. if ( threadsinfo_p == NULL )
  326. return thread_info_unlock ( errno );
  327. #endif
  328. debug ( 2, "There're %i threads.", threadsinfo_p->used );
  329. thread_num = -1;
  330. while ( ++thread_num < threadsinfo_p->used ) {
  331. int err;
  332. threadinfo_t *threadinfo_p = &threadsinfo_p->threads[thread_num];
  333. debug ( 3, "Trying thread #%i (==%i) (state: %i; expire at: %i, now: %i, exitcode: %i, errcode: %i; i_p: %p; p: %p).",
  334. thread_num, threadinfo_p->thread_num, threadinfo_p->state, threadinfo_p->expiretime, tm, threadinfo_p->exitcode,
  335. threadinfo_p->errcode, threadinfo_p, threadinfo_p->pthread );
  336. if ( threadinfo_p->state == STATE_EXIT )
  337. continue;
  338. if ( threadinfo_p->expiretime && ( threadinfo_p->expiretime <= tm ) ) {
  339. if ( pthread_tryjoin_np ( threadinfo_p->pthread, NULL ) ) { // TODO: check this pthread_tryjoin_np() on error returnings
  340. error ( "Debug3: thread_gc(): Thread #%i is alive too long: %lu <= %lu (started at %lu)", thread_num, threadinfo_p->expiretime, tm, threadinfo_p->starttime );
  341. return thread_info_unlock ( ETIME );
  342. }
  343. }
  344. #ifndef VERYPARANOID
  345. if ( threadinfo_p->state != STATE_TERM ) {
  346. debug ( 3, "Thread #%i is busy, skipping (#0).", thread_num );
  347. continue;
  348. }
  349. #endif
  350. debug ( 3, "Trying to join thread #%i: %p", thread_num, threadinfo_p->pthread );
  351. #ifndef VERYPARANOID
  352. switch ( ( err = pthread_join ( threadinfo_p->pthread, NULL ) ) ) {
  353. #else
  354. switch ( ( err = pthread_tryjoin_np ( threadinfo_p->pthread, NULL ) ) ) {
  355. case EBUSY:
  356. debug ( 3, "Thread #%i is busy, skipping (#1).", thread_num );
  357. continue;
  358. #endif
  359. case EDEADLK:
  360. case EINVAL:
  361. case 0:
  362. debug ( 3, "Thread #%i is finished with exitcode %i (errcode %i), deleting. threadinfo_p == %p",
  363. thread_num, threadinfo_p->exitcode, threadinfo_p->errcode, threadinfo_p );
  364. break;
  365. default:
  366. error ( "Got error while pthread_join() or pthread_tryjoin_np().", strerror ( err ), err );
  367. return thread_info_unlock ( errno );
  368. }
  369. if ( threadinfo_p->errcode ) {
  370. error ( "Got error from thread #%i: errcode %i.", thread_num, threadinfo_p->errcode );
  371. thread_info_unlock ( 0 );
  372. thread_del_bynum ( thread_num );
  373. return threadinfo_p->errcode;
  374. }
  375. thread_info_unlock ( 0 );
  376. if ( thread_del_bynum ( thread_num ) )
  377. return errno;
  378. thread_info_lock();
  379. }
  380. debug ( 3, "There're %i threads left.", threadsinfo_p->used - threadsinfo_p->stacklen );
  381. return thread_info_unlock ( 0 );
  382. }
  383. int thread_cleanup ( ctx_t *ctx_p )
  384. {
  385. ( void ) ctx_p;
  386. debug ( 3, "" );
  387. threadsinfo_t *threadsinfo_p = thread_info_lock();
  388. #ifdef PARANOID
  389. if ( threadsinfo_p == NULL )
  390. return thread_info_unlock ( errno );
  391. #endif
  392. // Waiting for threads:
  393. debug ( 1, "There're %i opened threads. Waiting.", threadsinfo_p->used );
  394. while ( threadsinfo_p->used ) {
  395. // int err;
  396. threadinfo_t *threadinfo_p = &threadsinfo_p->threads[--threadsinfo_p->used];
  397. if ( threadinfo_p->state == STATE_EXIT )
  398. continue;
  399. //pthread_kill(threadinfo_p->pthread, SIGTERM);
  400. debug ( 1, "killing pid %i with SIGTERM", threadinfo_p->child_pid );
  401. kill ( threadinfo_p->child_pid, SIGTERM );
  402. pthread_join ( threadinfo_p->pthread, NULL );
  403. debug ( 2, "thread #%i exitcode: %i", threadsinfo_p->used, threadinfo_p->exitcode );
  404. /*
  405. if(threadinfo_p->callback)
  406. if((err=threadinfo_p->callback(ctx_p, threadinfo_p->argv)))
  407. warning("Got error from callback function.", strerror(err), err);
  408. */
  409. char **ptr = threadinfo_p->argv;
  410. while ( *ptr )
  411. free ( * ( ptr++ ) );
  412. free ( threadinfo_p->argv );
  413. }
  414. debug ( 3, "All threads are closed." );
  415. // Freeing
  416. if ( threadsinfo_p->allocated ) {
  417. free ( threadsinfo_p->threads );
  418. free ( threadsinfo_p->threadsstack );
  419. }
  420. if ( threadsinfo_p->mutex_init ) {
  421. int i = 0;
  422. while ( i < PTHREAD_MUTEX_MAX ) {
  423. pthread_mutex_destroy ( &threadsinfo_p->mutex[i] );
  424. pthread_cond_destroy ( &threadsinfo_p->cond [i] );
  425. i++;
  426. }
  427. }
  428. #ifdef PARANOID
  429. // Reseting
  430. memset ( threadsinfo_p, 0, sizeof ( *threadsinfo_p ) ); // Just in case;
  431. #endif
  432. debug ( 3, "done." );
  433. return thread_info_unlock ( 0 );
  434. }
  435. #endif
  436. volatile state_t *state_p = NULL;
  437. volatile int exitcode = 0;
  438. #define SHOULD_THREAD(ctx_p) ((ctx_p->flags[THREADING] != PM_OFF) && (ctx_p->flags[THREADING] != PM_SAFE || ctx_p->iteration_num))
  439. int exec_argv ( char **argv, int *child_pid )
  440. {
  441. debug ( 3, "Thread %p.", pthread_self() );
  442. pid_t pid;
  443. int status;
  444. // Forking
  445. pid = privileged_fork_execvp ( argv[0], ( char *const * ) argv );
  446. // debug(3, "After fork thread %p"")".", pthread_self() );
  447. debug ( 3, "Child pid is %u", pid );
  448. // Setting *child_pid value
  449. if ( child_pid )
  450. *child_pid = pid;
  451. // Waiting for process end
  452. #ifdef VERYPARANOID
  453. sigset_t sigset_exec, sigset_old;
  454. sigemptyset ( &sigset_exec );
  455. sigaddset ( &sigset_exec, SIGUSR_BLOPINT );
  456. pthread_sigmask ( SIG_BLOCK, &sigset_exec, &sigset_old );
  457. #endif
  458. // debug(3, "Pre-wait thread %p"")".", pthread_self() );
  459. if ( privileged_waitpid ( pid, &status, 0 ) != pid ) {
  460. switch ( errno ) {
  461. case ECHILD:
  462. debug ( 2, "Child %u is already dead.", pid );
  463. break;
  464. default:
  465. error ( "Cannot waitid()." );
  466. return errno;
  467. }
  468. }
  469. // debug(3, "After-wait thread %p"")".", pthread_self() );
  470. #ifdef VERYPARANOID
  471. pthread_sigmask ( SIG_SETMASK, &sigset_old, NULL );
  472. #endif
  473. // Return
  474. int exitcode = WEXITSTATUS ( status );
  475. debug ( 3, "execution completed with exitcode %i", exitcode );
  476. return exitcode;
  477. }
  478. #ifdef THREADING_SUPPORT
  479. static inline int thread_exit ( threadinfo_t *threadinfo_p, int exitcode )
  480. {
  481. int err = 0;
  482. threadinfo_p->exitcode = exitcode;
  483. #if _DEBUG_FORCE | VERYPARANOID
  484. if ( threadinfo_p->pthread != pthread_self() ) {
  485. error ( "pthread id mismatch! (i_p->p) %p != (p) %p""", threadinfo_p->pthread, pthread_self() );
  486. return EINVAL;
  487. }
  488. #endif
  489. if ( threadinfo_p->callback ) {
  490. if ( threadinfo_p->ctx_p->flags[DEBUG] > 2 ) {
  491. debug ( 3, "thread %p, argv: ", threadinfo_p->pthread );
  492. char **argv = threadinfo_p->argv;
  493. while ( *argv ) {
  494. debug ( 3, "\t%p == %s", *argv, *argv );
  495. argv++;
  496. }
  497. }
  498. if ( ( err = threadinfo_p->callback ( threadinfo_p->ctx_p, threadinfo_p->callback_arg ) ) ) {
  499. error ( "Got error from callback function.", strerror ( err ), err );
  500. threadinfo_p->errcode = err;
  501. }
  502. }
  503. // Notifying the parent-thread, that it's time to collect garbage threads
  504. threadinfo_p->state = STATE_TERM;
  505. debug ( 3, "thread %p is sending signal to sighandler to call GC", threadinfo_p->pthread );
  506. return pthread_kill ( pthread_sighandler, SIGUSR_THREAD_GC );
  507. }
  508. #endif
  509. static inline void so_call_sync_finished ( int n, api_eventinfo_t *ei )
  510. {
  511. int i = 0;
  512. api_eventinfo_t *ei_i = ei;
  513. while ( i < n ) {
  514. #ifdef PARANOID
  515. if ( ei_i->path == NULL ) {
  516. warning ( "ei_i->path == NULL" );
  517. i++;
  518. continue;
  519. }
  520. #endif
  521. free ( ( char * ) ei_i->path );
  522. ei_i++;
  523. i++;
  524. }
  525. if ( ei != NULL )
  526. free ( ei );
  527. return;
  528. }
  529. #ifdef THREADING_SUPPORT
  530. int so_call_sync_thread ( threadinfo_t *threadinfo_p )
  531. {
  532. debug ( 3, "thread_num == %i; threadinfo_p == %p; i_p->pthread %p; thread %p",
  533. threadinfo_p->thread_num, threadinfo_p, threadinfo_p->pthread, pthread_self() );
  534. ctx_t *ctx_p = threadinfo_p->ctx_p;
  535. int n = threadinfo_p->n;
  536. api_eventinfo_t *ei = threadinfo_p->ei;
  537. int err = 0, rc = 0, try_again = 0;
  538. do {
  539. try_again = 0;
  540. threadinfo_p->try_n++;
  541. rc = ctx_p->handler_funct.sync ( n, ei );
  542. if ( ( err = exitcode_process ( threadinfo_p->ctx_p, rc ) ) ) {
  543. try_again = ( ( !ctx_p->retries ) || ( threadinfo_p->try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
  544. warning ( "Bad exitcode %i (errcode %i). %s.", rc, err, try_again ? "Retrying" : "Give up" );
  545. if ( try_again ) {
  546. debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
  547. sleep ( ctx_p->syncdelay );
  548. }
  549. }
  550. } while ( err && ( ( !ctx_p->retries ) || ( threadinfo_p->try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) );
  551. if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
  552. error ( "Bad exitcode %i (errcode %i)", rc, err );
  553. threadinfo_p->errcode = err;
  554. }
  555. so_call_sync_finished ( n, ei );
  556. if ( ( err = thread_exit ( threadinfo_p, rc ) ) ) {
  557. exitcode = err; // This's global variable "exitcode"
  558. pthread_kill ( pthread_sighandler, SIGTERM );
  559. }
  560. return rc;
  561. }
  562. #endif
  563. static inline int so_call_sync ( ctx_t *ctx_p, indexes_t *indexes_p, int n, api_eventinfo_t *ei )
  564. {
  565. debug ( 2, "n == %i", n );
  566. #ifdef THREADING_SUPPORT
  567. if ( !SHOULD_THREAD ( ctx_p ) ) {
  568. #endif
  569. int rc = 0, ret = 0, err = 0;
  570. int try_n = 0, try_again;
  571. state_t status = STATE_UNKNOWN;
  572. // indexes_p->nonthreaded_syncing_fpath2ei_ht = g_hash_table_dup(indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, (gpointer(*)(gpointer))strdup, eidup);
  573. indexes_p->nonthreaded_syncing_fpath2ei_ht = indexes_p->fpath2ei_ht;
  574. do {
  575. try_again = 0;
  576. try_n++;
  577. alarm ( ctx_p->synctimeout );
  578. rc = ctx_p->handler_funct.sync ( n, ei );
  579. alarm ( 0 );
  580. if ( ( err = exitcode_process ( ctx_p, rc ) ) ) {
  581. if ( ( try_n == 1 ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
  582. status = ctx_p->state;
  583. ctx_p->state = STATE_SYNCHANDLER_ERR;
  584. main_status_update ( ctx_p );
  585. }
  586. try_again = ( ( !ctx_p->retries ) || ( try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
  587. warning ( "Bad exitcode %i (errcode %i). %s.", rc, err, try_again ? "Retrying" : "Give up" );
  588. if ( try_again ) {
  589. debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
  590. sleep ( ctx_p->syncdelay );
  591. }
  592. }
  593. } while ( err && ( ( !ctx_p->retries ) || ( try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) );
  594. if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
  595. error ( "Bad exitcode %i (errcode %i)", rc, err );
  596. ret = err;
  597. } else if ( status != STATE_UNKNOWN ) {
  598. ctx_p->state = status;
  599. main_status_update ( ctx_p );
  600. }
  601. // g_hash_table_destroy(indexes_p->nonthreaded_syncing_fpath2ei_ht);
  602. indexes_p->nonthreaded_syncing_fpath2ei_ht = NULL;
  603. so_call_sync_finished ( n, ei );
  604. return ret;
  605. #ifdef THREADING_SUPPORT
  606. }
  607. threadinfo_t *threadinfo_p = thread_new();
  608. if ( threadinfo_p == NULL )
  609. return errno;
  610. threadinfo_p->try_n = 0;
  611. threadinfo_p->callback = NULL;
  612. threadinfo_p->argv = NULL;
  613. threadinfo_p->ctx_p = ctx_p;
  614. threadinfo_p->starttime = time ( NULL );
  615. threadinfo_p->fpath2ei_ht = g_hash_table_dup ( indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, ( gpointer ( * ) ( gpointer ) ) strdup, eidup );
  616. threadinfo_p->n = n;
  617. threadinfo_p->ei = ei;
  618. threadinfo_p->iteration = ctx_p->iteration_num;
  619. if ( ctx_p->synctimeout )
  620. threadinfo_p->expiretime = threadinfo_p->starttime + ctx_p->synctimeout;
  621. if ( pthread_create ( &threadinfo_p->pthread, NULL, ( void * ( * ) ( void * ) ) so_call_sync_thread, threadinfo_p ) ) {
  622. error ( "Cannot pthread_create()." );
  623. return errno;
  624. }
  625. debug ( 3, "thread %p", threadinfo_p->pthread );
  626. return 0;
  627. #endif
  628. }
  629. static inline int so_call_rsync_finished ( ctx_t *ctx_p, const char *inclistfile, const char *exclistfile )
  630. {
  631. int ret0, ret1;
  632. debug ( 5, "" );
  633. if ( ctx_p->flags[DONTUNLINK] )
  634. return 0;
  635. if ( inclistfile == NULL ) {
  636. error ( "inclistfile == NULL." );
  637. return EINVAL;
  638. }
  639. debug ( 3, "unlink()-ing \"%s\"", inclistfile );
  640. ret0 = unlink ( inclistfile );
  641. if ( ctx_p->flags[RSYNCPREFERINCLUDE] )
  642. return ret0;
  643. if ( exclistfile == NULL ) {
  644. error ( "exclistfile == NULL." );
  645. return EINVAL;
  646. }
  647. debug ( 3, "unlink()-ing \"%s\"", exclistfile );
  648. ret1 = unlink ( exclistfile );
  649. return ret0 == 0 ? ret1 : ret0;
  650. }
  651. #ifdef THREADING_SUPPORT
  652. int so_call_rsync_thread ( threadinfo_t *threadinfo_p )
  653. {
  654. debug ( 3, "thread_num == %i; threadinfo_p == %p; i_p->pthread %p; thread %p",
  655. threadinfo_p->thread_num, threadinfo_p, threadinfo_p->pthread, pthread_self() );
  656. ctx_t *ctx_p = threadinfo_p->ctx_p;
  657. char **argv = threadinfo_p->argv;
  658. int err = 0, rc = 0, try_again;
  659. do {
  660. try_again = 0;
  661. threadinfo_p->try_n++;
  662. rc = ctx_p->handler_funct.rsync ( argv[0], argv[1] );
  663. if ( ( err = exitcode_process ( threadinfo_p->ctx_p, rc ) ) ) {
  664. try_again = ( ( !ctx_p->retries ) || ( threadinfo_p->try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
  665. warning ( "Bad exitcode %i (errcode %i). %s.", rc, err, try_again ? "Retrying" : "Give up" );
  666. if ( try_again ) {
  667. debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
  668. sleep ( ctx_p->syncdelay );
  669. }
  670. }
  671. } while ( try_again );
  672. if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
  673. error ( "Bad exitcode %i (errcode %i)", rc, err );
  674. threadinfo_p->errcode = err;
  675. }
  676. if ( ( err = so_call_rsync_finished ( ctx_p, argv[0], argv[1] ) ) ) {
  677. exitcode = err; // This's global variable "exitcode"
  678. pthread_kill ( pthread_sighandler, SIGTERM );
  679. }
  680. free ( argv[0] );
  681. free ( argv[1] );
  682. free ( argv );
  683. if ( ( err = thread_exit ( threadinfo_p, rc ) ) ) {
  684. exitcode = err; // This's global variable "exitcode"
  685. pthread_kill ( pthread_sighandler, SIGTERM );
  686. }
  687. return rc;
  688. }
  689. #endif
  690. static inline int so_call_rsync ( ctx_t *ctx_p, indexes_t *indexes_p, const char *inclistfile, const char *exclistfile )
  691. {
  692. debug ( 2, "inclistfile == \"%s\"; exclistfile == \"%s\"", inclistfile, exclistfile );
  693. #ifdef THREADING_SUPPORT
  694. if ( !SHOULD_THREAD ( ctx_p ) ) {
  695. #endif
  696. debug ( 3, "ctx_p->handler_funct.rsync == %p", ctx_p->handler_funct.rsync );
  697. // indexes_p->nonthreaded_syncing_fpath2ei_ht = g_hash_table_dup(indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, (gpointer(*)(gpointer))strdup, eidup);
  698. indexes_p->nonthreaded_syncing_fpath2ei_ht = indexes_p->fpath2ei_ht;
  699. int rc = 0, err = 0;
  700. int try_n = 0, try_again;
  701. state_t status = STATE_UNKNOWN;
  702. do {
  703. try_again = 0;
  704. try_n++;
  705. alarm ( ctx_p->synctimeout );
  706. rc = ctx_p->handler_funct.rsync ( inclistfile, exclistfile );
  707. alarm ( 0 );
  708. if ( ( err = exitcode_process ( ctx_p, rc ) ) ) {
  709. if ( ( try_n == 1 ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
  710. status = ctx_p->state;
  711. ctx_p->state = STATE_SYNCHANDLER_ERR;
  712. main_status_update ( ctx_p );
  713. }
  714. try_again = ( ( !ctx_p->retries ) || ( try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
  715. warning ( "Bad exitcode %i (errcode %i). %s.", rc, err, try_again ? "Retrying" : "Give up" );
  716. if ( try_again ) {
  717. debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
  718. sleep ( ctx_p->syncdelay );
  719. }
  720. }
  721. } while ( try_again );
  722. if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
  723. error ( "Bad exitcode %i (errcode %i)", rc, err );
  724. rc = err;
  725. } else if ( ( status != STATE_UNKNOWN ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
  726. ctx_p->state = status;
  727. main_status_update ( ctx_p );
  728. }
  729. // g_hash_table_destroy(indexes_p->nonthreaded_syncing_fpath2ei_ht);
  730. indexes_p->nonthreaded_syncing_fpath2ei_ht = NULL;
  731. int ret_cleanup;
  732. if ( ( ret_cleanup = so_call_rsync_finished ( ctx_p, inclistfile, exclistfile ) ) )
  733. return rc ? rc : ret_cleanup;
  734. return rc;
  735. #ifdef THREADING_SUPPORT
  736. }
  737. threadinfo_t *threadinfo_p = thread_new();
  738. if ( threadinfo_p == NULL )
  739. return errno;
  740. threadinfo_p->try_n = 0;
  741. threadinfo_p->callback = NULL;
  742. threadinfo_p->argv = xmalloc ( sizeof ( char * ) * 3 );
  743. threadinfo_p->ctx_p = ctx_p;
  744. threadinfo_p->starttime = time ( NULL );
  745. threadinfo_p->fpath2ei_ht = g_hash_table_dup ( indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, ( gpointer ( * ) ( gpointer ) ) strdup, eidup );
  746. threadinfo_p->iteration = ctx_p->iteration_num;
  747. threadinfo_p->argv[0] = strdup ( inclistfile );
  748. threadinfo_p->argv[1] = strdup ( exclistfile );
  749. if ( ctx_p->synctimeout )
  750. threadinfo_p->expiretime = threadinfo_p->starttime + ctx_p->synctimeout;
  751. if ( pthread_create ( &threadinfo_p->pthread, NULL, ( void * ( * ) ( void * ) ) so_call_rsync_thread, threadinfo_p ) ) {
  752. error ( "Cannot pthread_create()." );
  753. return errno;
  754. }
  755. debug ( 3, "thread %p", threadinfo_p->pthread );
  756. return 0;
  757. #endif
  758. }
  759. // === SYNC_EXEC() === {
  760. //#define SYNC_EXEC(...) (SHOULD_THREAD(ctx_p) ? sync_exec_thread : sync_exec )(__VA_ARGS__)
  761. #ifdef THREADING_SUPPORT
  762. #define SYNC_EXEC_ARGV(...) (SHOULD_THREAD(ctx_p) ? sync_exec_argv_thread : sync_exec_argv)(__VA_ARGS__)
  763. #else
  764. #define SYNC_EXEC_ARGV(...) sync_exec_argv(__VA_ARGS__)
  765. #endif
  766. #define debug_argv_dump(level, argv)\
  767. if (unlikely(ctx_p->flags[DEBUG] >= level))\
  768. argv_dump(level, argv)
  769. static inline void argv_dump ( int debug_level, char **argv )
  770. {
  771. #ifdef _DEBUG_FORCE
  772. debug ( 19, "(%u, %p)", debug_level, argv );
  773. #endif
  774. char **argv_p = argv;
  775. while ( *argv_p != NULL ) {
  776. debug ( debug_level, "%p: \"%s\"", *argv_p, *argv_p );
  777. argv_p++;
  778. }
  779. return;
  780. }
  781. #define _sync_exec_getargv(argv, firstarg, COPYARG) {\
  782. va_list arglist;\
  783. va_start(arglist, firstarg);\
  784. \
  785. int i = 0;\
  786. do {\
  787. char *arg;\
  788. if(i >= MAXARGUMENTS) {\
  789. error("Too many arguments (%i >= %i).", i, MAXARGUMENTS);\
  790. return ENOMEM;\
  791. }\
  792. arg = (char *)va_arg(arglist, const char *const);\
  793. argv[i] = arg!=NULL ? COPYARG : NULL;\
  794. } while(argv[i++] != NULL);\
  795. va_end(arglist);\
  796. }
  797. char *sync_path_rel2abs ( ctx_t *ctx_p, const char *path_rel, ssize_t path_rel_len, size_t *path_abs_len_p, char *path_abs_oldptr )
  798. {
  799. if ( path_rel == NULL )
  800. return NULL;
  801. if ( path_rel_len == -1 )
  802. path_rel_len = strlen ( path_rel );
  803. char *path_abs;
  804. size_t watchdirlen =
  805. ( ctx_p->watchdir == ctx_p->watchdirwslash ) ? 0 : ctx_p->watchdirlen;
  806. // if [watchdir == "/"] ? 0 : watchdir.length()
  807. size_t path_abs_len = path_rel_len + watchdirlen + 1;
  808. path_abs = ( path_abs_len_p == NULL || path_abs_len >= *path_abs_len_p ) ?
  809. xrealloc ( path_abs_oldptr, path_abs_len + 1 ) :
  810. path_abs_oldptr;
  811. if ( path_abs_oldptr == NULL ) {
  812. memcpy ( path_abs, ctx_p->watchdir, watchdirlen );
  813. path_abs[watchdirlen] = '/';
  814. }
  815. memcpy ( &path_abs[watchdirlen + 1], path_rel, path_rel_len + 1 );
  816. if ( path_abs_len_p != NULL )
  817. *path_abs_len_p = path_abs_len;
  818. return path_abs;
  819. }
  820. char *sync_path_abs2rel ( ctx_t *ctx_p, const char *path_abs, ssize_t path_abs_len, size_t *path_rel_len_p, char *path_rel_oldptr )
  821. {
  822. if ( path_abs == NULL )
  823. return NULL;
  824. if ( path_abs_len == -1 )
  825. path_abs_len = strlen ( path_abs );
  826. size_t path_rel_len;
  827. char *path_rel;
  828. size_t watchdirlen =
  829. ( ctx_p->watchdir == ctx_p->watchdirwslash ) ? 0 : ctx_p->watchdirlen;
  830. signed long path_rel_len_signed = path_abs_len - ( watchdirlen + 1 );
  831. path_rel_len = ( path_rel_len_signed > 0 ) ? path_rel_len_signed : 0;
  832. path_rel = ( path_rel_len_p == NULL || path_rel_len >= *path_rel_len_p ) ?
  833. xrealloc ( path_rel_oldptr, path_rel_len + 1 ) :
  834. path_rel_oldptr;
  835. if ( !path_rel_len ) {
  836. path_rel[0] = 0;
  837. return path_rel;
  838. }
  839. memcpy ( path_rel, &path_abs[watchdirlen + 1], path_rel_len + 1 );
  840. #ifdef VERYPARANOID
  841. // Removing "/" on the end
  842. debug ( 3, "\"%s\" (len: %i) --%i--> \"%s\" (len: %i) + ",
  843. path_abs, path_abs_len, path_rel[path_rel_len - 1] == '/',
  844. ctx_p->watchdirwslash, watchdirlen + 1 );
  845. if ( path_rel[path_rel_len - 1] == '/' )
  846. path_rel[--path_rel_len] = 0x00;
  847. debug ( 3, "\"%s\" (len: %i)", path_rel, path_rel_len );
  848. #endif
  849. if ( path_rel_len_p != NULL )
  850. *path_rel_len_p = path_rel_len;
  851. return path_rel;
  852. }
  853. pid_t clsyncapi_fork ( ctx_t *ctx_p )
  854. {
  855. // if(ctx_p->flags[THREADING])
  856. // return fork();
  857. // Cleaning stale pids. TODO: Optimize this. Remove this GC.
  858. int i = 0;
  859. while ( i < ctx_p->children ) {
  860. if ( privileged_waitpid ( ctx_p->child_pid[i], NULL, WNOHANG ) < 0 )
  861. if ( errno == ECHILD )
  862. ctx_p->child_pid[i] = ctx_p->child_pid[--ctx_p->children];
  863. i++;
  864. }
  865. // Too many children
  866. if ( ctx_p->children >= MAXCHILDREN ) {
  867. errno = ECANCELED;
  868. return -1;
  869. }
  870. // Forking
  871. pid_t pid = fork();
  872. ctx_p->child_pid[ctx_p->children++] = pid;
  873. return pid;
  874. }
  875. int sync_exec_argv ( ctx_t *ctx_p, indexes_t *indexes_p, thread_callbackfunct_t callback, thread_callbackfunct_arg_t *callback_arg_p, char **argv )
  876. {
  877. debug ( 2, "" );
  878. debug_argv_dump ( 2, argv );
  879. // indexes_p->nonthreaded_syncing_fpath2ei_ht = g_hash_table_dup(indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, (gpointer(*)(gpointer))strdup, eidup);
  880. indexes_p->nonthreaded_syncing_fpath2ei_ht = indexes_p->fpath2ei_ht;
  881. int exitcode = 0, ret = 0, err = 0;
  882. int try_n = 0, try_again;
  883. state_t status = STATE_UNKNOWN;
  884. do {
  885. try_again = 0;
  886. try_n++;
  887. debug ( 2, "try_n == %u (retries == %u)", try_n, ctx_p->retries );
  888. alarm ( ctx_p->synctimeout );
  889. ctx_p->children = 1;
  890. exitcode = exec_argv ( argv, ctx_p->child_pid );
  891. ctx_p->children = 0;
  892. alarm ( 0 );
  893. if ( ( err = exitcode_process ( ctx_p, exitcode ) ) ) {
  894. if ( ( try_n == 1 ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
  895. status = ctx_p->state;
  896. ctx_p->state = STATE_SYNCHANDLER_ERR;
  897. main_status_update ( ctx_p );
  898. }
  899. try_again = ( ( !ctx_p->retries ) || ( try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
  900. warning ( "Bad exitcode %i (errcode %i). %s.", exitcode, err, try_again ? "Retrying" : "Give up" );
  901. if ( try_again ) {
  902. debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
  903. sleep ( ctx_p->syncdelay );
  904. }
  905. }
  906. } while ( try_again );
  907. if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
  908. error ( "Bad exitcode %i (errcode %i)", exitcode, err );
  909. ret = err;
  910. } else if ( ( status != STATE_UNKNOWN ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
  911. ctx_p->state = status;
  912. main_status_update ( ctx_p );
  913. }
  914. if ( callback != NULL ) {
  915. int nret = callback ( ctx_p, callback_arg_p );
  916. if ( nret ) {
  917. error ( "Got error while callback()." );
  918. if ( !ret ) ret = nret;
  919. }
  920. }
  921. // g_hash_table_destroy(indexes_p->nonthreaded_syncing_fpath2ei_ht);
  922. indexes_p->nonthreaded_syncing_fpath2ei_ht = NULL;
  923. return ret;
  924. }
  925. /*
  926. static inline int sync_exec(ctx_t *ctx_p, indexes_t *indexes_p, thread_callbackfunct_t callback, thread_callbackfunct_arg_t *callback_arg_p, ...) {
  927. int rc;
  928. debug(2, "");
  929. char **argv = (char **)xcalloc(sizeof(char *), MAXARGUMENTS);
  930. memset(argv, 0, sizeof(char *)*MAXARGUMENTS);
  931. _sync_exec_getargv(argv, callback_arg_p, arg);
  932. rc = sync_exec_argv(ctx_p, indexes_p, callback, callback_arg_p, argv);
  933. free(argv);
  934. return rc;
  935. }
  936. */
  937. #ifdef THREADING_SUPPORT
  938. int __sync_exec_thread ( threadinfo_t *threadinfo_p )
  939. {
  940. char **argv = threadinfo_p->argv;
  941. ctx_t *ctx_p = threadinfo_p->ctx_p;
  942. debug ( 3, "thread_num == %i; threadinfo_p == %p; i_p->pthread %p; thread %p""",
  943. threadinfo_p->thread_num, threadinfo_p, threadinfo_p->pthread, pthread_self() );
  944. int err = 0, exec_exitcode = 0, try_again;
  945. do {
  946. try_again = 0;
  947. threadinfo_p->try_n++;
  948. exec_exitcode = exec_argv ( argv, &threadinfo_p->child_pid );
  949. if ( ( err = exitcode_process ( threadinfo_p->ctx_p, exec_exitcode ) ) ) {
  950. try_again = ( ( !ctx_p->retries ) || ( threadinfo_p->try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
  951. warning ( "__sync_exec_thread(): Bad exitcode %i (errcode %i). %s.", exec_exitcode, err, try_again ? "Retrying" : "Give up" );
  952. if ( try_again ) {
  953. debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
  954. sleep ( ctx_p->syncdelay );
  955. }
  956. }
  957. } while ( try_again );
  958. if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
  959. error ( "Bad exitcode %i (errcode %i)", exec_exitcode, err );
  960. threadinfo_p->errcode = err;
  961. }
  962. g_hash_table_destroy ( threadinfo_p->fpath2ei_ht );
  963. if ( ( err = thread_exit ( threadinfo_p, exec_exitcode ) ) ) {
  964. exitcode = err; // This's global variable "exitcode"
  965. pthread_kill ( pthread_sighandler, SIGTERM );
  966. }
  967. debug ( 3, "thread_num == %i; threadinfo_p == %p; i_p->pthread %p; thread %p""; errcode %i",
  968. threadinfo_p->thread_num, threadinfo_p, threadinfo_p->pthread, pthread_self(), threadinfo_p->errcode );
  969. return exec_exitcode;
  970. }
  971. static inline int sync_exec_argv_thread ( ctx_t *ctx_p, indexes_t *indexes_p, thread_callbackfunct_t callback, thread_callbackfunct_arg_t *callback_arg_p, char **argv )
  972. {
  973. debug ( 2, "" );
  974. debug_argv_dump ( 2, argv );
  975. threadinfo_t *threadinfo_p = thread_new();
  976. if ( threadinfo_p == NULL )
  977. return errno;
  978. threadinfo_p->try_n = 0;
  979. threadinfo_p->callback = callback;
  980. threadinfo_p->callback_arg = callback_arg_p;
  981. threadinfo_p->argv = argv;
  982. threadinfo_p->ctx_p = ctx_p;
  983. threadinfo_p->starttime = time ( NULL );
  984. threadinfo_p->fpath2ei_ht = g_hash_table_dup ( indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, ( gpointer ( * ) ( gpointer ) ) strdup, eidup );
  985. threadinfo_p->iteration = ctx_p->iteration_num;
  986. if ( ctx_p->synctimeout )
  987. threadinfo_p->expiretime = threadinfo_p->starttime + ctx_p->synctimeout;
  988. if ( pthread_create ( &threadinfo_p->pthread, NULL, ( void * ( * ) ( void * ) ) __sync_exec_thread, threadinfo_p ) ) {
  989. error ( "Cannot pthread_create()." );
  990. return errno;
  991. }
  992. debug ( 3, "thread %p", threadinfo_p->pthread );
  993. return 0;
  994. }
  995. /*
  996. static inline int sync_exec_thread(ctx_t *ctx_p, indexes_t *indexes_p, thread_callbackfunct_t callback, thread_callbackfunct_arg_t *callback_arg_p, ...) {
  997. debug(2, "");
  998. char **argv = (char **)xcalloc(sizeof(char *), MAXARGUMENTS);
  999. memset(argv, 0, sizeof(char *)*MAXARGUMENTS);
  1000. _sync_exec_getargv(argv, callback_arg_p, strdup(arg));
  1001. return sync_exec_argv_thread(ctx_p, indexes_p, callback, callback_arg_p, argv);
  1002. }
  1003. */
  1004. #endif
  1005. // } === SYNC_EXEC() ===
  1006. static int sync_queuesync ( const char *fpath_rel, eventinfo_t *evinfo, ctx_t *ctx_p, indexes_t *indexes_p, queue_id_t queue_id )
  1007. {
  1008. debug ( 3, "sync_queuesync(\"%s\", ...): fsize == %lu; tres == %lu, queue_id == %u", fpath_rel, evinfo->fsize, ctx_p->bfilethreshold, queue_id );
  1009. if ( queue_id == QUEUE_AUTO )
  1010. queue_id = ( evinfo->fsize > ctx_p->bfilethreshold ) ? QUEUE_BIGFILE : QUEUE_NORMAL;
  1011. queueinfo_t *queueinfo = &ctx_p->_queues[queue_id];
  1012. if ( !queueinfo->stime )
  1013. queueinfo->stime = time ( NULL );
  1014. // char *fpath_rel = sync_path_abs2rel(ctx_p, fpath, -1, NULL, NULL);
  1015. // Filename can contain "" character that conflicts with event-row separator of list-files.
  1016. if ( strchr ( fpath_rel, '\n' ) ) {
  1017. // At the moment, we will just ignore events of such files :(
  1018. debug ( 3, "There's \"\\n\" character in path \"%s\". Ignoring it :(. Feedback to: https://github.com/clsync/clsync/issues/12", fpath_rel );
  1019. return 0;
  1020. }
  1021. #ifdef CLUSTER_SUPPORT
  1022. if ( ctx_p->cluster_iface )
  1023. cluster_capture ( fpath_rel );
  1024. #endif
  1025. eventinfo_t *evinfo_q = indexes_lookupinqueue ( indexes_p, fpath_rel, queue_id );
  1026. if ( evinfo_q == NULL ) {
  1027. eventinfo_t *evinfo_dup = ( eventinfo_t * ) xmalloc ( sizeof ( *evinfo_dup ) );
  1028. memcpy ( evinfo_dup, evinfo, sizeof ( *evinfo_dup ) );
  1029. return indexes_queueevent ( indexes_p, strdup ( fpath_rel ), evinfo_dup, queue_id );
  1030. } else {
  1031. evinfo_merge ( ctx_p, evinfo_q, evinfo );
  1032. }
  1033. return 0;
  1034. }
  1035. static inline void evinfo_initialevmask ( ctx_t *ctx_p, eventinfo_t *evinfo_p, int isdir )
  1036. {
  1037. switch ( ctx_p->flags[MONITOR] ) {
  1038. #ifdef FANOTIFY_SUPPORT
  1039. case NE_FANOTIFY:
  1040. critical ( "fanotify is not supported" );
  1041. break;
  1042. #endif
  1043. #if INOTIFY_SUPPORT | KQUEUE_SUPPORT
  1044. #ifdef INOTIFY_SUPPORT
  1045. case NE_INOTIFY:
  1046. #endif
  1047. #ifdef KQUEUE_SUPPORT
  1048. case NE_KQUEUE:
  1049. #endif
  1050. evinfo_p->evmask = IN_CREATE_SELF;
  1051. if ( isdir )
  1052. evinfo_p->evmask |= IN_ISDIR;
  1053. break;
  1054. #endif
  1055. #ifdef BSM_SUPPORT
  1056. case NE_BSM:
  1057. case NE_BSM_PREFETCH:
  1058. evinfo_p->evmask = ( isdir ? AUE_MKDIR : AUE_OPEN_RWC );
  1059. break;
  1060. #endif
  1061. #ifdef GIO_SUPPORT
  1062. case NE_GIO:
  1063. evinfo_p->evmask = G_FILE_MONITOR_EVENT_CREATED;
  1064. break;
  1065. #endif
  1066. #ifdef VERYPARANOID
  1067. default:
  1068. critical ( "Unknown monitor subsystem: %u", ctx_p->flags[MONITOR] );
  1069. #endif
  1070. }
  1071. return;
  1072. }
  1073. static inline void api_evinfo_initialevmask ( ctx_t *ctx_p, api_eventinfo_t *evinfo_p, int isdir )
  1074. {
  1075. eventinfo_t evinfo = {0};
  1076. evinfo_initialevmask ( ctx_p, &evinfo, isdir );
  1077. evinfo_p->evmask = evinfo.evmask;
  1078. return;
  1079. }
  1080. int sync_dosync ( const char *fpath, uint32_t evmask, ctx_t *ctx_p, indexes_t *indexes_p );
  1081. int sync_initialsync_walk ( ctx_t *ctx_p, const char *dirpath, indexes_t *indexes_p, queue_id_t queue_id, initsync_t initsync )
  1082. {
  1083. int ret = 0;
  1084. const char *rootpaths[] = {dirpath, NULL};
  1085. eventinfo_t evinfo;
  1086. FTS *tree;
  1087. rule_t *rules_p = ctx_p->rules;
  1088. debug ( 2, "(ctx_p, \"%s\", indexes_p, %i, %i).", dirpath, queue_id, initsync );
  1089. char skip_rules = ( initsync == INITSYNC_FULL ) && ctx_p->flags[INITFULL];
  1090. char rsync_and_prefer_excludes =
  1091. (
  1092. ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) ||
  1093. ( ctx_p->flags[MODE] == MODE_RSYNCSHELL ) ||
  1094. ( ctx_p->flags[MODE] == MODE_RSYNCSO )
  1095. ) &&
  1096. !ctx_p->flags[RSYNCPREFERINCLUDE];
  1097. if ( ( !ctx_p->flags[RSYNCPREFERINCLUDE] ) && skip_rules )
  1098. return 0;
  1099. skip_rules |= ( ctx_p->rules_count == 0 );
  1100. char fts_no_stat =
  1101. (
  1102. (
  1103. initsync == INITSYNC_FULL
  1104. ) ||
  1105. (
  1106. ctx_p->_queues[QUEUE_NORMAL ].collectdelay ==
  1107. ctx_p->_queues[QUEUE_BIGFILE].collectdelay
  1108. ) ||
  1109. (
  1110. ctx_p->bfilethreshold == 0
  1111. )
  1112. ) && ! ( ctx_p->flags[EXCLUDEMOUNTPOINTS] );
  1113. int fts_opts = FTS_NOCHDIR | FTS_PHYSICAL |
  1114. ( fts_no_stat ? FTS_NOSTAT : 0 ) |
  1115. ( ctx_p->flags[ONEFILESYSTEM] ? FTS_XDEV : 0 );
  1116. debug ( 3, "fts_opts == %p", ( void * ) ( long ) fts_opts );
  1117. tree = privileged_fts_open ( ( char *const * ) &rootpaths, fts_opts, NULL, PC_SYNC_INIIALSYNC_WALK_FTS_OPEN );
  1118. if ( tree == NULL ) {
  1119. error ( "Cannot privileged_fts_open() on \"%s\".", dirpath );
  1120. return errno;
  1121. }
  1122. memset ( &evinfo, 0, sizeof ( evinfo ) );
  1123. FTSENT *node;
  1124. char *path_rel = NULL;
  1125. size_t path_rel_len = 0;
  1126. #ifdef VERYPARANOID
  1127. errno = 0;
  1128. #endif
  1129. while ( ( node = privileged_fts_read ( tree, PC_SYNC_INIIALSYNC_WALK_FTS_READ ) ) ) {
  1130. switch ( node->fts_info ) {
  1131. // Duplicates:
  1132. case FTS_DP:
  1133. continue;
  1134. // To sync:
  1135. case FTS_DEFAULT:
  1136. case FTS_SL:
  1137. case FTS_SLNONE:
  1138. case FTS_F:
  1139. case FTS_D:
  1140. case FTS_DOT:
  1141. case FTS_DC: // TODO: think about case of FTS_DC
  1142. case FTS_NSOK:
  1143. break;
  1144. // Error cases:
  1145. case FTS_ERR:
  1146. case FTS_NS:
  1147. case FTS_DNR: {
  1148. int fts_errno = node->fts_errno;
  1149. if ( fts_errno == ENOENT ) {
  1150. debug ( 1, "Got error while privileged_fts_read(): %s (errno: %i; fts_info: %i).", strerror ( fts_errno ), fts_errno, node->fts_info );
  1151. continue;
  1152. } else {
  1153. error ( "Got error while privileged_fts_read(): %s (errno: %i; fts_info: %i).", strerror ( fts_errno ), fts_errno, node->fts_info );
  1154. ret = node->fts_errno;
  1155. goto l_sync_initialsync_walk_end;
  1156. }
  1157. }
  1158. default:
  1159. error ( "Got unknown fts_info vlaue while privileged_fts_read(): %i.", node->fts_info );
  1160. ret = EINVAL;
  1161. goto l_sync_initialsync_walk_end;
  1162. }
  1163. path_rel = sync_path_abs2rel ( ctx_p, node->fts_path, -1, &path_rel_len, path_rel );
  1164. debug ( 3, "Pointing to \"%s\" (node->fts_info == %i)", path_rel, node->fts_info );
  1165. if ( ctx_p->flags[EXCLUDEMOUNTPOINTS] && node->fts_info == FTS_D ) {
  1166. if ( rsync_and_prefer_excludes ) {
  1167. if ( node->fts_statp->st_dev != ctx_p->st_dev ) {
  1168. debug ( 3, "Excluding \"%s\" due to location on other device: node->fts_statp->st_dev [0x%o] != ctx_p->st_dev [0x%o]", path_rel, node->fts_statp->st_dev, ctx_p->st_dev );
  1169. if ( queue_id == QUEUE_AUTO ) {
  1170. int i = 0;
  1171. while ( i < QUEUE_MAX )
  1172. indexes_addexclude ( indexes_p, strdup ( path_rel ), EVIF_CONTENTRECURSIVELY, i++ );
  1173. } else
  1174. indexes_addexclude ( indexes_p, strdup ( path_rel ), EVIF_CONTENTRECURSIVELY, queue_id );
  1175. fts_set ( tree, node, FTS_SKIP );
  1176. }
  1177. } else if ( !ctx_p->flags[RSYNCPREFERINCLUDE] )
  1178. error ( "Excluding mount points is not implentemted for non \"rsync*\" modes." );
  1179. }
  1180. mode_t st_mode = fts_no_stat ? ( node->fts_info == FTS_D ? S_IFDIR : S_IFREG ) : node->fts_statp->st_mode;
  1181. if ( !skip_rules ) {
  1182. ruleaction_t perm = rules_getperm ( path_rel, st_mode, rules_p, RA_WALK | RA_SYNC );
  1183. if ( ! ( perm & RA_WALK ) ) {
  1184. debug ( 3, "Rejecting to walk into \"%s\".", path_rel );
  1185. fts_set ( tree, node, FTS_SKIP );
  1186. }
  1187. if ( ! ( perm & RA_SYNC ) ) {
  1188. debug ( 3, "Excluding \"%s\".", path_rel );
  1189. if ( rsync_and_prefer_excludes ) {
  1190. if ( queue_id == QUEUE_AUTO ) {
  1191. int i = 0;
  1192. while ( i < QUEUE_MAX )
  1193. indexes_addexclude ( indexes_p, strdup ( path_rel ), EVIF_NONE, i++ );
  1194. } else
  1195. indexes_addexclude ( indexes_p, strdup ( path_rel ), EVIF_NONE, queue_id );
  1196. }
  1197. continue;
  1198. }
  1199. }
  1200. if ( !rsync_and_prefer_excludes ) {
  1201. evinfo_initialevmask ( ctx_p, &evinfo, node->fts_info == FTS_D );
  1202. switch ( ctx_p->flags[MODE] ) {
  1203. case MODE_SIMPLE:
  1204. SAFE ( sync_dosync ( node->fts_path, evinfo.evmask, ctx_p, indexes_p ), debug ( 1, "fpath == \"%s\"; evmask == 0x%o", node->fts_path, evinfo.evmask ); return -1; );
  1205. continue;
  1206. default:
  1207. break;
  1208. }
  1209. evinfo.seqid_min = sync_seqid();
  1210. evinfo.seqid_max = evinfo.seqid_min;
  1211. evinfo.objtype_old = EOT_DOESNTEXIST;
  1212. evinfo.objtype_new = node->fts_info == FTS_D ? EOT_DIR : EOT_FILE;
  1213. evinfo.fsize = fts_no_stat ? 0 : node->fts_statp->st_size;
  1214. debug ( 3, "queueing \"%s\" (depth: %i) with int-flags %p", node->fts_path, node->fts_level, ( void * ) ( unsigned long ) evinfo.flags );
  1215. int _ret = sync_queuesync ( path_rel, &evinfo, ctx_p, indexes_p, queue_id );
  1216. if ( _ret ) {
  1217. error ( "Got error while queueing \"%s\".", node->fts_path );
  1218. ret = errno;
  1219. goto l_sync_initialsync_walk_end;
  1220. }
  1221. continue;
  1222. }
  1223. /* "FTS optimization" */
  1224. if (
  1225. skip_rules &&
  1226. node->fts_info == FTS_D &&
  1227. !ctx_p->flags[EXCLUDEMOUNTPOINTS]
  1228. ) {
  1229. debug ( 4, "\"FTS optimizator\"" );
  1230. fts_set ( tree, node, FTS_SKIP );
  1231. }
  1232. }
  1233. if ( errno ) {
  1234. error ( "Got error while privileged_fts_read() and related routines." );
  1235. ret = errno;
  1236. goto l_sync_initialsync_walk_end;
  1237. }
  1238. if ( privileged_fts_close ( tree, PC_SYNC_INIIALSYNC_WALK_FTS_CLOSE ) ) {
  1239. error ( "Got error while privileged_fts_close()." );
  1240. ret = errno;
  1241. goto l_sync_initialsync_walk_end;
  1242. }
  1243. l_sync_initialsync_walk_end:
  1244. if ( path_rel != NULL )
  1245. free ( path_rel );
  1246. return ret;
  1247. }
  1248. const char *sync_parameter_get ( const char *variable_name, void *_dosync_arg_p )
  1249. {
  1250. struct dosync_arg *dosync_arg_p = _dosync_arg_p;
  1251. ctx_t *ctx_p = dosync_arg_p->ctx_p;
  1252. #ifdef _DEBUG_FORCE
  1253. debug ( 15, "(\"%s\", %p): 0x%x, \"%s\"", variable_name, _dosync_arg_p, ctx_p == NULL ? 0 : ctx_p->synchandler_argf, dosync_arg_p->evmask_str );
  1254. #endif
  1255. if ( ( ctx_p == NULL || ( ctx_p->synchandler_argf & SHFL_INCLUDE_LIST_PATH ) ) && !strcmp ( variable_name, "INCLUDE-LIST-PATH" ) )
  1256. return dosync_arg_p->outf_path;
  1257. else if ( ( ctx_p == NULL || ( ctx_p->synchandler_argf & SHFL_EXCLUDE_LIST_PATH ) ) && !strcmp ( variable_name, "EXCLUDE-LIST-PATH" ) )
  1258. return dosync_arg_p->excf_path;
  1259. else if ( !strcmp ( variable_name, "TYPE" ) )
  1260. return dosync_arg_p->list_type_str;
  1261. else if ( !strcmp ( variable_name, "EVENT-MASK" ) )
  1262. return dosync_arg_p->evmask_str;
  1263. errno = ENOENT;
  1264. return NULL;
  1265. }
  1266. static char **sync_customargv ( ctx_t *ctx_p, struct dosync_arg *dosync_arg_p, synchandler_args_t *args_p )
  1267. {
  1268. int d, s;
  1269. char **argv = ( char ** ) xcalloc ( sizeof ( char * ), MAXARGUMENTS + 2 );
  1270. s = d = 0;
  1271. argv[d++] = strdup ( ctx_p->handlerfpath );
  1272. while ( s < args_p->c ) {
  1273. char *arg = args_p->v[s];
  1274. char isexpanded = args_p->isexpanded[s];
  1275. s++;
  1276. #ifdef _DEBUG_FORCE
  1277. debug ( 30, "\"%s\" [%p]", arg, arg );
  1278. #endif
  1279. if ( isexpanded ) {
  1280. #ifdef _DEBUG_FORCE
  1281. debug ( 19, "\"%s\" [%p] is already expanded, just strdup()-ing it", arg, arg );
  1282. #endif
  1283. argv[d++] = strdup ( arg );
  1284. continue;
  1285. }
  1286. if ( !strcmp ( arg, "%INCLUDE-LIST%" ) ) {
  1287. int i = 0, e = dosync_arg_p->include_list_count;
  1288. const char **include_list = dosync_arg_p->include_list;
  1289. #ifdef _DEBUG_FORCE
  1290. debug ( 19, "INCLUDE-LIST: e == %u; d,s: %u,%u", e, d, s );
  1291. #endif
  1292. while ( i < e ) {
  1293. #ifdef PARANOID
  1294. if ( d >= MAXARGUMENTS ) {
  1295. errno = E2BIG;
  1296. critical ( "Too many arguments" );
  1297. }
  1298. #endif
  1299. argv[d++] = parameter_expand ( ctx_p, strdup ( include_list[i++] ), PEF_NONE, NULL, NULL, sync_parameter_get, dosync_arg_p );
  1300. #ifdef _DEBUG_FORCE
  1301. debug ( 19, "include-list: argv[%u] == %p", d - 1, argv[d - 1] );
  1302. #endif
  1303. }
  1304. continue;
  1305. }
  1306. #ifdef PARANOID
  1307. if ( d >= MAXARGUMENTS ) {
  1308. errno = E2BIG;
  1309. critical ( "Too many arguments" );
  1310. }
  1311. #endif
  1312. argv[d] = parameter_expand ( ctx_p, strdup ( arg ), PEF_NONE, NULL, NULL, sync_parameter_get, dosync_arg_p );
  1313. #ifdef _DEBUG_FORCE
  1314. debug ( 19, "argv[%u] == %p \"%s\"", d, argv[d], argv[d] );
  1315. #endif
  1316. d++;
  1317. }
  1318. argv[d] = NULL;
  1319. #ifdef _DEBUG_FORCE
  1320. debug ( 18, "return %p", argv );
  1321. #endif
  1322. return argv;
  1323. }
  1324. static void argv_free ( char **argv )
  1325. {
  1326. char **argv_p;
  1327. #ifdef _DEBUG_FORCE
  1328. debug ( 18, "(%p)", argv );
  1329. #endif
  1330. #ifdef VERYPARANOID
  1331. if ( argv == NULL )
  1332. critical ( MSG_SECURITY_PROBLEM ( "argv_free(NULL)" ) );
  1333. #endif
  1334. argv_p = argv;
  1335. while ( *argv_p != NULL ) {
  1336. #ifdef _DEBUG_FORCE
  1337. debug ( 25, "free(%p)", *argv_p );
  1338. #endif
  1339. free ( * ( argv_p++ ) );
  1340. }
  1341. free ( argv );
  1342. return;
  1343. }
  1344. static inline int sync_initialsync_finish ( ctx_t *ctx_p, initsync_t initsync, int ret )
  1345. {
  1346. ( void ) initsync;
  1347. finish_iteration ( ctx_p );
  1348. return ret;
  1349. }
  1350. int sync_initialsync ( const char *path, ctx_t *ctx_p, indexes_t *indexes_p, initsync_t initsync )
  1351. {
  1352. int ret;
  1353. queue_id_t queue_id;
  1354. debug ( 3, "(\"%s\", ctx_p, indexes_p, %i)", path, initsync );
  1355. #ifdef CLUSTER_SUPPORT
  1356. if ( initsync == INITSYNC_FULL ) {
  1357. if ( ctx_p->cluster_iface )
  1358. return cluster_initialsync();
  1359. }
  1360. #endif
  1361. if ( initsync == INITSYNC_FULL )
  1362. queue_id = QUEUE_INSTANT;
  1363. else
  1364. queue_id = QUEUE_NORMAL;
  1365. // non-RSYNC case:
  1366. if (
  1367. ! (
  1368. ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) ||
  1369. ( ctx_p->flags[MODE] == MODE_RSYNCSHELL ) ||
  1370. ( ctx_p->flags[MODE] == MODE_RSYNCSO )
  1371. )
  1372. ) {
  1373. debug ( 3, "syncing \"%s\"", path );
  1374. if ( ctx_p->flags[HAVERECURSIVESYNC] ) {
  1375. if ( ctx_p->flags[MODE] == MODE_SO ) {
  1376. api_eventinfo_t *ei = ( api_eventinfo_t * ) xmalloc ( sizeof ( *ei ) );
  1377. #ifdef PARANIOD
  1378. memset ( ei, 0, sizeof ( *ei ) );
  1379. #endif
  1380. api_evinfo_initialevmask ( ctx_p, ei, 1 );
  1381. ei->flags = EVIF_RECURSIVELY;
  1382. ei->path_len = strlen ( path );
  1383. ei->path = strdup ( path );
  1384. ei->objtype_old = EOT_DOESNTEXIST;
  1385. ei->objtype_new = EOT_DIR;
  1386. ret = so_call_sync ( ctx_p, indexes_p, 1, ei );
  1387. return sync_initialsync_finish ( ctx_p, initsync, ret );
  1388. } else {
  1389. struct dosync_arg dosync_arg;
  1390. synchandler_args_t *args_p;
  1391. args_p = ctx_p->synchandler_args[SHARGS_INITIAL].c ?
  1392. &ctx_p->synchandler_args[SHARGS_INITIAL] :
  1393. &ctx_p->synchandler_args[SHARGS_PRIMARY];
  1394. dosync_arg.ctx_p = ctx_p;
  1395. *dosync_arg.include_list = path;
  1396. dosync_arg.include_list_count = 1;
  1397. dosync_arg.list_type_str = "initialsync";
  1398. char **argv = sync_customargv ( ctx_p, &dosync_arg, args_p );
  1399. ret = SYNC_EXEC_ARGV (
  1400. ctx_p,
  1401. indexes_p,
  1402. NULL,
  1403. NULL,
  1404. argv );
  1405. #ifdef THREADING_SUPPORT
  1406. if ( !SHOULD_THREAD ( ctx_p ) ) // If it's a thread then it will free the argv in GC. If not a thread then we have to free right here.
  1407. #endif
  1408. argv_free ( argv );
  1409. return sync_initialsync_finish ( ctx_p, initsync, ret );
  1410. }
  1411. }
  1412. #ifdef DOXYGEN
  1413. sync_exec_argv ( NULL, NULL );
  1414. sync_exec_argv_thread ( NULL, NULL );
  1415. #endif
  1416. ret = sync_initialsync_walk ( ctx_p, path, indexes_p, queue_id, initsync );
  1417. if ( ret )
  1418. error ( "Cannot get synclist" );
  1419. return sync_initialsync_finish ( ctx_p, initsync, ret );
  1420. }
  1421. // RSYNC case:
  1422. if ( !ctx_p->flags[RSYNCPREFERINCLUDE] ) {
  1423. queueinfo_t *queueinfo = &ctx_p->_queues[queue_id];
  1424. if ( !queueinfo->stime )
  1425. queueinfo->stime = time ( NULL ); // Useful for debugging
  1426. eventinfo_t *evinfo = ( eventinfo_t * ) xmalloc ( sizeof ( *evinfo ) );
  1427. memset ( evinfo, 0, sizeof ( *evinfo ) );
  1428. evinfo->flags |= EVIF_RECURSIVELY;
  1429. evinfo->seqid_min = sync_seqid();
  1430. evinfo->seqid_max = evinfo->seqid_min;
  1431. evinfo->objtype_old = EOT_DOESNTEXIST;
  1432. evinfo->objtype_new = EOT_DIR;
  1433. // Searching for excludes
  1434. ret = sync_initialsync_walk ( ctx_p, path, indexes_p, queue_id, initsync );
  1435. if ( ret ) {
  1436. error ( "Cannot get exclude what to exclude" );
  1437. return sync_initialsync_finish ( ctx_p, initsync, ret );
  1438. }
  1439. debug ( 3, "queueing \"%s\" with int-flags %p", path, ( void * ) ( unsigned long ) evinfo->flags );
  1440. char *path_rel = sync_path_abs2rel ( ctx_p, path, -1, NULL, NULL );
  1441. ret = indexes_queueevent ( indexes_p, path_rel, evinfo, queue_id );
  1442. return sync_initialsync_finish ( ctx_p, initsync, ret );
  1443. }
  1444. // Searching for includes
  1445. ret = sync_initialsync_walk ( ctx_p, path, indexes_p, queue_id, initsync );
  1446. return sync_initialsync_finish ( ctx_p, initsync, ret );
  1447. }
  1448. int sync_notify_mark ( ctx_t *ctx_p, const char *accpath, const char *path, size_t pathlen, indexes_t *indexes_p )
  1449. {
  1450. debug ( 3, "(..., \"%s\", %i,...)", path, pathlen );
  1451. int wd = indexes_fpath2wd ( indexes_p, path );
  1452. if ( wd != -1 ) {
  1453. debug ( 1, "\"%s\" is already marked (wd: %i). Skipping.", path, wd );
  1454. return wd;
  1455. }
  1456. debug ( 5, "ctx_p->notifyenginefunct.add_watch_dir(ctx_p, indexes_p, \"%s\")", accpath );
  1457. if ( ( wd = ctx_p->notifyenginefunct.add_watch_dir ( ctx_p, indexes_p, accpath ) ) == -1 ) {
  1458. if ( errno == ENOENT )
  1459. return -2;
  1460. error ( "Cannot ctx_p->notifyenginefunct.add_watch_dir() on \"%s\".",
  1461. path );
  1462. return -1;
  1463. }
  1464. debug ( 6, "endof ctx_p->notifyenginefunct.add_watch_dir(ctx_p, indexes_p, \"%s\")", accpath );
  1465. indexes_add_wd ( indexes_p, wd, path, pathlen );
  1466. return wd;
  1467. }
  1468. #ifdef CLUSTER_SUPPORT
  1469. static inline int sync_mark_walk_cluster_modtime_update ( ctx_t *ctx_p, const char *path, short int dirlevel, mode_t st_mode )
  1470. {
  1471. if ( ctx_p->cluster_iface ) {
  1472. int ret = cluster_modtime_update ( path, dirlevel, st_mode );
  1473. if ( ret ) error ( "cannot cluster_modtime_update()" );
  1474. return ret;
  1475. }
  1476. return 0;
  1477. }
  1478. #endif
  1479. int sync_mark_walk ( ctx_t *ctx_p, const char *dirpath, indexes_t *indexes_p )
  1480. {
  1481. int ret = 0;
  1482. const char *rootpaths[] = {dirpath, NULL};
  1483. FTS *tree;
  1484. rule_t *rules_p = ctx_p->rules;
  1485. debug ( 2, "(ctx_p, \"%s\", indexes_p).", dirpath );
  1486. int fts_opts = FTS_NOCHDIR | FTS_PHYSICAL | FTS_NOSTAT | ( ctx_p->flags[ONEFILESYSTEM] ? FTS_XDEV : 0 );
  1487. debug ( 3, "fts_opts == %p", ( void * ) ( long ) fts_opts );
  1488. tree = privileged_fts_open ( ( char *const * ) rootpaths, fts_opts, NULL, PC_SYNC_MARK_WALK_FTS_OPEN );
  1489. if ( tree == NULL ) {
  1490. error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Cannot privileged_fts_open() on \"%s\".", dirpath );
  1491. return errno;
  1492. }
  1493. FTSENT *node;
  1494. char *path_rel = NULL;
  1495. size_t path_rel_len = 0;
  1496. #ifdef VERYPARANOID
  1497. errno = 0;
  1498. #endif
  1499. while ( ( node = privileged_fts_read ( tree, PC_SYNC_MARK_WALK_FTS_READ ) ) ) {
  1500. #ifdef CLUSTER_SUPPORT
  1501. int ret;
  1502. #endif
  1503. debug ( 2, "walking: \"%s\" (depth %u): fts_info == %i", node->fts_path, node->fts_level, node->fts_info );
  1504. switch ( node->fts_info ) {
  1505. // Duplicates:
  1506. case FTS_DP:
  1507. continue;
  1508. // Files:
  1509. case FTS_DEFAULT:
  1510. case FTS_SL:
  1511. case FTS_SLNONE:
  1512. case FTS_F:
  1513. case FTS_NSOK:
  1514. #ifdef CLUSTER_SUPPORT
  1515. if ( ( ret = sync_mark_walk_cluster_modtime_update ( ctx_p, node->fts_path, node->fts_level, S_IFREG ) ) )
  1516. goto l_sync_mark_walk_end;
  1517. #endif
  1518. continue;
  1519. // Directories (to mark):
  1520. case FTS_D:
  1521. case FTS_DC: // TODO: think about case of FTS_DC
  1522. case FTS_DOT:
  1523. #ifdef CLUSTER_SUPPORT
  1524. if ( ( ret = sync_mark_walk_cluster_modtime_update ( ctx_p, node->fts_path, node->fts_level, S_IFDIR ) ) )
  1525. goto l_sync_mark_walk_end;
  1526. #endif
  1527. break;
  1528. // Error cases:
  1529. case FTS_ERR:
  1530. case FTS_NS:
  1531. case FTS_DNR:
  1532. if ( errno == ENOENT ) {
  1533. debug ( 1, "Got error while privileged_fts_read(); fts_info: %i.", node->fts_info );
  1534. continue;
  1535. } else {
  1536. error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Got error while privileged_fts_read(); fts_info: %i.", node->fts_info );
  1537. ret = errno;
  1538. goto l_sync_mark_walk_end;
  1539. }
  1540. default:
  1541. error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Got unknown fts_info vlaue while privileged_fts_read(): %i.", node->fts_info );
  1542. ret = EINVAL;
  1543. goto l_sync_mark_walk_end;
  1544. }
  1545. path_rel = sync_path_abs2rel ( ctx_p, node->fts_path, -1, &path_rel_len, path_rel );
  1546. ruleaction_t perm = rules_search_getperm ( path_rel, S_IFDIR, rules_p, RA_WALK, NULL );
  1547. debug ( 3, "perm == 0x%o", perm );
  1548. if ( ! ( perm & RA_WALK ) ) {
  1549. debug ( 2, "setting an FTS_SKIP on the directory" );
  1550. if ( fts_set ( tree, node, FTS_SKIP ) )
  1551. warning ( "Got error while fts_set(tree, node, FTS_SKIP): %s", path_rel );
  1552. }
  1553. if ( ! ( perm & RA_MONITOR ) ) {
  1554. debug ( 2, "don't mark the directory" );
  1555. continue;
  1556. }
  1557. debug ( 2, "marking \"%s\" (depth %u)", node->fts_path, node->fts_level );
  1558. int wd = sync_notify_mark ( ctx_p, node->fts_accpath, node->fts_path, node->fts_pathlen, indexes_p );
  1559. if ( wd == -1 ) {
  1560. error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Got error while notify-marking \"%s\".", node->fts_path );
  1561. ret = errno;
  1562. goto l_sync_mark_walk_end;
  1563. }
  1564. debug ( 2, "watching descriptor is %i.", wd );
  1565. }
  1566. if ( errno ) {
  1567. error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Got error while privileged_fts_read() and related routines." );
  1568. ret = errno;
  1569. goto l_sync_mark_walk_end;
  1570. }
  1571. if ( privileged_fts_close ( tree, PC_SYNC_MARK_WALK_FTS_CLOSE ) ) {
  1572. error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Got error while privileged_fts_close()." );
  1573. ret = errno;
  1574. goto l_sync_mark_walk_end;
  1575. }
  1576. l_sync_mark_walk_end:
  1577. if ( path_rel != NULL )
  1578. free ( path_rel );
  1579. return ret;
  1580. }
  1581. int sync_notify_init ( ctx_t *ctx_p )
  1582. {
  1583. switch ( ctx_p->flags[MONITOR] ) {
  1584. #ifdef FANOTIFY_SUPPORT
  1585. case NE_FANOTIFY: {
  1586. ctx_p->fsmondata = ( long ) fanotify_init ( FANOTIFY_FLAGS, FANOTIFY_EVFLAGS );
  1587. if ( ( long ) ctx_p->fsmondata == -1 ) {
  1588. error ( "cannot fanotify_init(%i, %i).", FANOTIFY_FLAGS, FANOTIFY_EVFLAGS );
  1589. return -1;
  1590. }
  1591. return 0;
  1592. }
  1593. #endif
  1594. #ifdef INOTIFY_SUPPORT
  1595. case NE_INOTIFY: {
  1596. # ifdef INOTIFY_OLD
  1597. ctx_p->fsmondata = ( void * ) ( long ) inotify_init();
  1598. # if INOTIFY_FLAGS != 0
  1599. # warning Do not know how to set inotify flags (too old system)
  1600. # endif
  1601. # else
  1602. ctx_p->fsmondata = ( void * ) ( long ) inotify_init1 ( INOTIFY_FLAGS );
  1603. # endif
  1604. if ( ( long ) ctx_p->fsmondata == -1 ) {
  1605. error ( "cannot inotify_init1(%i).", INOTIFY_FLAGS );
  1606. return -1;
  1607. }
  1608. return 0;
  1609. }
  1610. #endif
  1611. #ifdef KQUEUE_SUPPORT
  1612. case NE_KQUEUE: {
  1613. int kqueue_d = kqueue_init ( ctx_p );
  1614. if ( kqueue_d == -1 ) {
  1615. error ( "cannot kqueue_init(ctx_p)." );
  1616. return -1;
  1617. }
  1618. return 0;
  1619. }
  1620. #endif
  1621. #ifdef BSM_SUPPORT
  1622. case NE_BSM:
  1623. case NE_BSM_PREFETCH: {
  1624. int bsm_d = bsm_init ( ctx_p );
  1625. if ( bsm_d == -1 ) {
  1626. error ( "cannot bsm_init(ctx_p)." );
  1627. return -1;
  1628. }
  1629. return 0;
  1630. }
  1631. #endif
  1632. #ifdef GIO_SUPPORT
  1633. case NE_GIO: {
  1634. critical_on ( gio_init ( ctx_p ) == -1 );
  1635. return 0;
  1636. }
  1637. #endif
  1638. }
  1639. error ( "unknown notify-engine: %i", ctx_p->flags[MONITOR] );
  1640. errno = EINVAL;
  1641. return -1;
  1642. }
  1643. static inline int sync_dosync_exec ( ctx_t *ctx_p, indexes_t *indexes_p, const char *evmask_str, const char *fpath )
  1644. {
  1645. int rc;
  1646. struct dosync_arg dosync_arg;
  1647. debug ( 20, "(ctx_p, indexes_p, \"%s\", \"%s\")", evmask_str, fpath );
  1648. dosync_arg.ctx_p = ctx_p;
  1649. *dosync_arg.include_list = fpath;
  1650. dosync_arg.include_list_count = 1;
  1651. dosync_arg.list_type_str = "sync";
  1652. dosync_arg.evmask_str = evmask_str;
  1653. char **argv = sync_customargv ( ctx_p, &dosync_arg, &ctx_p->synchandler_args[SHARGS_PRIMARY] );
  1654. rc = SYNC_EXEC_ARGV (
  1655. ctx_p,
  1656. indexes_p,
  1657. NULL, NULL,
  1658. argv );
  1659. #ifdef THREADING_SUPPORT
  1660. if ( !SHOULD_THREAD ( ctx_p ) ) // If it's a thread then it will free the argv in GC. If not a thread then we have to free right here.
  1661. #endif
  1662. argv_free ( argv );
  1663. return rc;
  1664. #ifdef DOXYGEN
  1665. sync_exec_argv ( NULL, NULL );
  1666. sync_exec_argv_thread ( NULL, NULL );
  1667. #endif
  1668. }
  1669. int sync_dosync ( const char *fpath, uint32_t evmask, ctx_t *ctx_p, indexes_t *indexes_p )
  1670. {
  1671. int ret;
  1672. #ifdef CLUSTER_SUPPORT
  1673. ret = cluster_lock ( fpath );
  1674. if ( ret ) return ret;
  1675. #endif
  1676. char *evmask_str = xmalloc ( 1 << 8 );
  1677. sprintf ( evmask_str, "%u", evmask );
  1678. ret = sync_dosync_exec ( ctx_p, indexes_p, evmask_str, fpath );
  1679. free ( evmask_str );
  1680. #ifdef CLUSTER_SUPPORT
  1681. ret = cluster_unlock_all();
  1682. #endif
  1683. return ret;
  1684. }
  1685. int fileischanged ( ctx_t *ctx_p, indexes_t *indexes_p, const char *path_rel, stat64_t *lst_p, int is_deleted )
  1686. {
  1687. if ( lst_p == NULL || !ctx_p->flags[MODSIGN] )
  1688. return 1;
  1689. debug ( 9, "Checking modification signature" );
  1690. fileinfo_t *finfo = indexes_fileinfo ( indexes_p, path_rel );
  1691. if ( finfo != NULL ) {
  1692. uint32_t diff;
  1693. if ( ! ( diff = stat_diff ( &finfo->lst, lst_p ) & ctx_p->flags[MODSIGN] ) ) {
  1694. debug ( 8, "Modification signature: File not changed: \"%s\"", path_rel );
  1695. return 0; // Skip file syncing if it's metadata not changed enough (according to "--modification-signature" setting)
  1696. }
  1697. debug ( 8, "Modification signature: stat_diff == 0x%o; significant diff == 0x%o (ctx_p->flags[MODSIGN] == 0x%o)", diff, diff & ctx_p->flags[MODSIGN], ctx_p->flags[MODSIGN] );
  1698. if ( is_deleted ) {
  1699. debug ( 8, "Modification signature: Deleting information about \"%s\"", path_rel );
  1700. indexes_fileinfo_add ( indexes_p, path_rel, NULL );
  1701. free ( finfo );
  1702. } else {
  1703. debug ( 8, "Modification signature: Updating information about \"%s\"", path_rel );
  1704. memcpy ( &finfo->lst, lst_p, sizeof ( finfo->lst ) );
  1705. }
  1706. } else {
  1707. debug ( 8, "There's no information about this file/dir: \"%s\". Just remembering the current state.", path_rel );
  1708. // Adding file/dir information
  1709. finfo = xmalloc ( sizeof ( *finfo ) );
  1710. memcpy ( &finfo->lst, lst_p, sizeof ( finfo->lst ) );
  1711. indexes_fileinfo_add ( indexes_p, path_rel, finfo );
  1712. }
  1713. return 1;
  1714. }
  1715. static inline int sync_indexes_fpath2ei_addfixed ( ctx_t *ctx_p, indexes_t *indexes_p, const char *fpath, eventinfo_t *evinfo )
  1716. {
  1717. static const char fpath_dot[] = ".";
  1718. const char *fpath_fixed;
  1719. fpath_fixed = fpath;
  1720. switch ( ctx_p->flags[MODE] ) {
  1721. case MODE_DIRECT:
  1722. // If fpath is empty (that means CWD) then assign it to "."
  1723. if ( !*fpath )
  1724. fpath_fixed = fpath_dot;
  1725. break;
  1726. default:
  1727. break;
  1728. }
  1729. return indexes_fpath2ei_add ( indexes_p, strdup ( fpath_fixed ), evinfo );
  1730. }
  1731. int sync_prequeue_loadmark
  1732. (
  1733. int monitored, // Is an event from event monitor handler (1 -- it is; 0 -- it doesn't)
  1734. ctx_t *ctx_p,
  1735. indexes_t *indexes_p,
  1736. const char *path_full,
  1737. const char *path_rel,
  1738. stat64_t *lst_p,
  1739. eventobjtype_t objtype_old,
  1740. eventobjtype_t objtype_new,
  1741. uint32_t event_mask,
  1742. int event_wd,
  1743. mode_t st_mode,
  1744. off_t st_size,
  1745. char **path_buf_p,
  1746. size_t *path_buf_len_p,
  1747. eventinfo_t *evinfo
  1748. )
  1749. {
  1750. debug ( 10, "%i %p %p %p %p %p %i %i 0x%o %i %i %i %p %p %p",
  1751. monitored,
  1752. ctx_p,
  1753. indexes_p,
  1754. path_full,
  1755. path_rel,
  1756. lst_p,
  1757. objtype_old,
  1758. objtype_new,
  1759. event_mask,
  1760. event_wd,
  1761. st_mode,
  1762. st_size,
  1763. path_buf_p,
  1764. path_buf_len_p,
  1765. evinfo
  1766. );
  1767. #ifdef PARANOID
  1768. // &path_buf and &path_buf_len are passed to do not reallocate memory for path_rel/path_full each time
  1769. if ( ( path_buf_p == NULL || path_buf_len_p == NULL ) && ( path_full == NULL || path_rel == NULL ) ) {
  1770. error ( "path_rel_p == NULL || path_rel_len_p == NULL" );
  1771. return EINVAL;
  1772. }
  1773. #endif
  1774. #ifdef VERYPARANOID
  1775. if ( path_full == NULL && path_rel == NULL ) {
  1776. error ( "path_full == NULL && path_rel == NULL" );
  1777. return EINVAL;
  1778. }
  1779. #endif
  1780. if ( path_rel == NULL ) {
  1781. *path_buf_p = sync_path_abs2rel ( ctx_p, path_full, -1, path_buf_len_p, *path_buf_p );
  1782. path_rel = *path_buf_p;
  1783. }
  1784. ruleaction_t perm = RA_ALL;
  1785. if ( st_mode ) {
  1786. // Checking by filter rules
  1787. perm = rules_getperm ( path_rel, st_mode, ctx_p->rules, RA_WALK | RA_MONITOR | RA_SYNC );
  1788. if ( ! ( perm & ( RA_MONITOR | RA_WALK | RA_SYNC ) ) ) {
  1789. return 0;
  1790. }
  1791. }
  1792. // Handling different cases
  1793. int is_dir = objtype_old == EOT_DIR || objtype_new == EOT_DIR;
  1794. int is_created = objtype_old == EOT_DOESNTEXIST;
  1795. int is_deleted = objtype_new == EOT_DOESNTEXIST;
  1796. debug ( 4, "is_dir == %x; is_created == %x; is_deleted == %x", is_dir, is_created, is_deleted );
  1797. if ( is_dir ) {
  1798. if ( is_created ) {
  1799. int ret;
  1800. if ( perm & ( RA_WALK | RA_MONITOR ) ) {
  1801. if ( path_full == NULL ) {
  1802. *path_buf_p = sync_path_rel2abs ( ctx_p, path_rel, -1, path_buf_len_p, *path_buf_p );
  1803. path_full = *path_buf_p;
  1804. }
  1805. if ( ( perm & RA_MONITOR ) && monitored ) {
  1806. ret = sync_mark_walk ( ctx_p, path_full, indexes_p );
  1807. if ( ret ) {
  1808. debug ( 1, "Seems, that directory \"%s\" disappeared, while trying to mark it.", path_full );
  1809. return 0;
  1810. }
  1811. }
  1812. if ( perm & RA_WALK ) {
  1813. ret = sync_initialsync ( path_full, ctx_p, indexes_p, INITSYNC_SUBDIR );
  1814. if ( ret ) {
  1815. errno = ret;
  1816. error ( "Got error from sync_initialsync()" );
  1817. return errno;
  1818. }
  1819. }
  1820. }
  1821. fileischanged ( ctx_p, indexes_p, path_rel, lst_p, is_deleted ); // Just to remember it's state
  1822. return 0;
  1823. } else if ( is_deleted ) {
  1824. debug ( 2, "Disappeared \".../%s\".", path_rel );
  1825. }
  1826. }
  1827. if ( ! ( perm & RA_SYNC ) ) {
  1828. return 0;
  1829. }
  1830. if ( !fileischanged ( ctx_p, indexes_p, path_rel, lst_p, is_deleted ) ) {
  1831. debug ( 4, "The file/dir is not changed. Returning." );
  1832. return 0;
  1833. }
  1834. switch ( ctx_p->flags[MODE] ) {
  1835. case MODE_SIMPLE:
  1836. if ( path_full == NULL ) {
  1837. *path_buf_p = sync_path_rel2abs ( ctx_p, path_rel, -1, path_buf_len_p, *path_buf_p );
  1838. path_full = *path_buf_p;
  1839. }
  1840. return SAFE ( sync_dosync ( path_full, event_mask, ctx_p, indexes_p ), debug ( 1, "fpath == \"%s\"; evmask == 0x%o", path_full, event_mask ); return -1; );
  1841. default:
  1842. break;
  1843. }
  1844. // Locally queueing the event
  1845. int isnew = 0;
  1846. if ( evinfo == NULL )
  1847. evinfo = indexes_fpath2ei ( indexes_p, path_rel );
  1848. else
  1849. isnew++; // It's new for prequeue (but old for lockwait queue)
  1850. if ( evinfo == NULL ) {
  1851. evinfo = ( eventinfo_t * ) xmalloc ( sizeof ( *evinfo ) );
  1852. memset ( evinfo, 0, sizeof ( *evinfo ) );
  1853. evinfo->fsize = st_size;
  1854. evinfo->wd = event_wd;
  1855. evinfo->seqid_min = sync_seqid();
  1856. evinfo->seqid_max = evinfo->seqid_min;
  1857. evinfo->objtype_old = objtype_old;
  1858. isnew++;
  1859. debug ( 3, "new event: fsize == %i; wd == %i", evinfo->fsize, evinfo->wd );
  1860. } else {
  1861. evinfo->seqid_max = sync_seqid();
  1862. }
  1863. switch ( ctx_p->flags[MONITOR] ) {
  1864. #ifdef KQUEUE_SUPPORT
  1865. case NE_KQUEUE:
  1866. #endif
  1867. #ifdef INOTIFY_SUPPORT
  1868. case NE_INOTIFY:
  1869. #endif
  1870. #if KQUEUE_SUPPORT | INOTIFY_SUPPORT
  1871. evinfo->evmask |= event_mask;
  1872. break;
  1873. #endif
  1874. #ifdef BSM_SUPPORT
  1875. case NE_BSM:
  1876. case NE_BSM_PREFETCH:
  1877. evinfo->evmask = event_mask;
  1878. break;
  1879. #endif
  1880. #ifdef GIO_SUPPORT
  1881. case NE_GIO:
  1882. evinfo->evmask = event_mask;
  1883. break;
  1884. #endif
  1885. }
  1886. evinfo->objtype_new = objtype_new;
  1887. debug ( 2, "path_rel == \"%s\"; evinfo->objtype_old == %i; evinfo->objtype_new == %i; "
  1888. "evinfo->seqid_min == %u; evinfo->seqid_max == %u",
  1889. path_rel, evinfo->objtype_old, evinfo->objtype_new,
  1890. evinfo->seqid_min, evinfo->seqid_max
  1891. );
  1892. if ( isnew )
  1893. // Fix the path (if required) and call indexes_fpath2ei_add() to remeber the new object to be synced
  1894. sync_indexes_fpath2ei_addfixed ( ctx_p, indexes_p, path_rel, evinfo );
  1895. return 0;
  1896. }
  1897. void _sync_idle_dosync_collectedexcludes ( gpointer fpath_gp, gpointer flags_gp, gpointer arg_gp )
  1898. {
  1899. char *fpath = ( char * ) fpath_gp;
  1900. indexes_t *indexes_p = ( ( struct dosync_arg * ) arg_gp )->indexes_p;
  1901. debug ( 3, "\"%s\", %u (%p).", fpath, GPOINTER_TO_INT ( flags_gp ), flags_gp );
  1902. indexes_addexclude_aggr ( indexes_p, strdup ( fpath ), ( eventinfo_flags_t ) GPOINTER_TO_INT ( flags_gp ) );
  1903. return;
  1904. }
  1905. // Is not a thread-save function!
  1906. eventinfo_t *ht_fpath_isincluded ( GHashTable *ht, const char *const fpath )
  1907. {
  1908. static char buf[PATH_MAX + 2] = {0};
  1909. char *ptr, *end;
  1910. eventinfo_t *evinfo = g_hash_table_lookup ( ht, fpath );
  1911. debug ( 5, "looking up for \"%s\": %p", fpath, evinfo );
  1912. if ( evinfo != NULL )
  1913. return evinfo;
  1914. if ( !*fpath )
  1915. return NULL;
  1916. evinfo = g_hash_table_lookup ( ht, "" );
  1917. if ( evinfo != NULL ) {
  1918. debug ( 5, "recursive looking up for \"\": %p (%x: recusively: %x)", evinfo, evinfo->flags, evinfo->flags & EVIF_RECURSIVELY );
  1919. if ( evinfo->flags & EVIF_RECURSIVELY )
  1920. return evinfo;
  1921. }
  1922. size_t fpath_len = strlen ( fpath );
  1923. memcpy ( buf, fpath, fpath_len + 1 );
  1924. ptr = buf;
  1925. end = &buf[fpath_len];
  1926. while ( ptr < end ) {
  1927. if ( *ptr == '/' ) {
  1928. *ptr = 0;
  1929. evinfo = g_hash_table_lookup ( ht, buf );
  1930. if ( evinfo != NULL ) {
  1931. debug ( 5, "recursive looking up for \"%s\": %p (%x: recusively: %x)", buf, evinfo, evinfo->flags, evinfo->flags & EVIF_RECURSIVELY );
  1932. *ptr = '/';
  1933. if ( evinfo->flags & EVIF_RECURSIVELY )
  1934. return evinfo;
  1935. }
  1936. }
  1937. ptr++;
  1938. }
  1939. return evinfo;
  1940. }
  1941. int _sync_islocked ( threadinfo_t *threadinfo_p, void *_fpath )
  1942. {
  1943. char *fpath = _fpath;
  1944. eventinfo_t *evinfo = ht_fpath_isincluded ( threadinfo_p->fpath2ei_ht, fpath );
  1945. debug ( 4, "scanning thread %p: fpath<%s> -> evinfo<%p>", threadinfo_p->pthread, fpath, evinfo );
  1946. if ( evinfo != NULL )
  1947. return 1;
  1948. return 0;
  1949. }
  1950. static inline int sync_islocked ( const char *const fpath )
  1951. {
  1952. #ifdef THREADING_SUPPORT
  1953. int rc = threads_foreach ( _sync_islocked, STATE_RUNNING, ( void * ) fpath );
  1954. debug ( 3, "<%s>: %u", fpath, rc );
  1955. return rc;
  1956. #else
  1957. return 0;
  1958. #endif
  1959. }
  1960. void _sync_idle_dosync_collectedevents ( gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp )
  1961. {
  1962. char *fpath = ( char * ) fpath_gp;
  1963. eventinfo_t *evinfo = ( eventinfo_t * ) evinfo_gp;
  1964. int *evcount_p = & ( ( struct dosync_arg * ) arg_gp )->evcount;
  1965. ctx_t *ctx_p = ( ( struct dosync_arg * ) arg_gp )->ctx_p;
  1966. indexes_t *indexes_p = ( ( struct dosync_arg * ) arg_gp )->indexes_p;
  1967. queue_id_t queue_id = ( queue_id_t ) ( ( struct dosync_arg * ) arg_gp )->data;
  1968. debug ( 3, "queue_id == %i.", queue_id );
  1969. if ( ctx_p->flags[THREADING] == PM_SAFE )
  1970. if ( sync_islocked ( fpath ) ) {
  1971. debug ( 3, "\"%s\" is locked, dropping to waitlock queue", fpath );
  1972. eventinfo_t *evinfo_dup = xmalloc ( sizeof ( *evinfo_dup ) );
  1973. memcpy ( evinfo_dup, evinfo, sizeof ( *evinfo ) );
  1974. sync_queuesync ( fpath, evinfo_dup, ctx_p, indexes_p, QUEUE_LOCKWAIT );
  1975. return;
  1976. }
  1977. if ( ( ctx_p->listoutdir == NULL ) && ( ! ( ctx_p->synchandler_argf & SHFL_INCLUDE_LIST ) ) && ( ! ( ctx_p->flags[MODE] == MODE_SO ) ) ) {
  1978. debug ( 3, "calling sync_dosync()" );
  1979. SAFE ( sync_dosync ( fpath, evinfo->evmask, ctx_p, indexes_p ), debug ( 1, "fpath == \"%s\"; evmask == 0x%o", fpath, evinfo->evmask ); exit ( errno ? errno : -1 ) ); // TODO: remove exit() from here
  1980. return;
  1981. }
  1982. int isnew = 0;
  1983. eventinfo_t *evinfo_idx = indexes_fpath2ei ( indexes_p, fpath );
  1984. if ( evinfo_idx == NULL ) {
  1985. evinfo_idx = ( eventinfo_t * ) xmalloc ( sizeof ( *evinfo_idx ) );
  1986. memset ( evinfo_idx, 0, sizeof ( *evinfo_idx ) );
  1987. isnew++;
  1988. ( *evcount_p )++;
  1989. evinfo_idx->evmask = evinfo->evmask;
  1990. evinfo_idx->flags = evinfo->flags;
  1991. evinfo_idx->objtype_old = evinfo->objtype_old;
  1992. evinfo_idx->objtype_new = evinfo->objtype_new;
  1993. evinfo_idx->seqid_min = evinfo->seqid_min;
  1994. evinfo_idx->seqid_max = evinfo->seqid_max;
  1995. } else
  1996. evinfo_merge ( ctx_p, evinfo_idx, evinfo );
  1997. queue_id_t _queue_id = 0;
  1998. while ( _queue_id < QUEUE_MAX ) {
  1999. if ( _queue_id == queue_id ) {
  2000. _queue_id++;
  2001. continue;
  2002. }
  2003. eventinfo_t *evinfo_q = indexes_lookupinqueue ( indexes_p, fpath, _queue_id );
  2004. if ( evinfo_q != NULL ) {
  2005. evinfo_merge ( ctx_p, evinfo_idx, evinfo_q );
  2006. indexes_removefromqueue ( indexes_p, fpath, _queue_id );
  2007. if ( !indexes_queuelen ( indexes_p, _queue_id ) )
  2008. ctx_p->_queues[_queue_id].stime = 0;
  2009. }
  2010. _queue_id++;
  2011. }
  2012. if ( isnew ) {
  2013. debug ( 4, "Collecting \"%s\"", fpath );
  2014. // Fix the path (if required) and call indexes_fpath2ei_add() to remeber the new object to be synced
  2015. sync_indexes_fpath2ei_addfixed ( ctx_p, indexes_p, fpath, evinfo_idx );
  2016. } else
  2017. free ( fpath );
  2018. return;
  2019. }
  2020. struct trylocked_arg {
  2021. char *path_full;
  2022. size_t path_full_len;
  2023. };
  2024. gboolean sync_trylocked ( gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp )
  2025. {
  2026. char *fpath = ( char * ) fpath_gp;
  2027. eventinfo_t *evinfo = ( eventinfo_t * ) evinfo_gp;
  2028. struct dosync_arg *arg_p = ( struct dosync_arg * ) arg_gp;
  2029. ctx_t *ctx_p = arg_p->ctx_p;
  2030. indexes_t *indexes_p = arg_p->indexes_p;
  2031. struct trylocked_arg *data = arg_p->data;
  2032. if ( !sync_islocked ( fpath ) ) {
  2033. if ( sync_prequeue_loadmark ( 0, ctx_p, indexes_p, NULL, fpath, NULL,
  2034. evinfo->objtype_old,
  2035. evinfo->objtype_new,
  2036. evinfo->evmask,
  2037. 0, 0, 0, &data->path_full, &data->path_full_len, evinfo ) ) {
  2038. critical ( "Cannot re-queue \"%s\" to be synced", fpath );
  2039. return FALSE;
  2040. }
  2041. return TRUE;
  2042. }
  2043. return FALSE;
  2044. }
  2045. int sync_idle_dosync_collectedevents_cleanup ( ctx_t *ctx_p, thread_callbackfunct_arg_t *arg_p )
  2046. {
  2047. int ret0 = 0, ret1 = 0;
  2048. if ( ctx_p->flags[DONTUNLINK] )
  2049. return 0;
  2050. debug ( 3, "(ctx_p, {inc: %p, exc: %p}) thread %p", arg_p->incfpath, arg_p->excfpath, pthread_self() );
  2051. if ( arg_p->excfpath != NULL ) {
  2052. debug ( 3, "unlink()-ing exclude-file: \"%s\"", arg_p->excfpath );
  2053. ret0 = unlink ( arg_p->excfpath );
  2054. free ( arg_p->excfpath );
  2055. }
  2056. if ( arg_p->incfpath != NULL ) {
  2057. debug ( 3, "unlink()-ing include-file: \"%s\"", arg_p->incfpath );
  2058. ret1 = unlink ( arg_p->incfpath );
  2059. free ( arg_p->incfpath );
  2060. }
  2061. free ( arg_p );
  2062. return ret0 ? ret0 : ret1;
  2063. }
  2064. void sync_queuesync_wrapper ( gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp )
  2065. {
  2066. char *fpath_rel = ( char * ) fpath_gp;
  2067. eventinfo_t *evinfo = ( eventinfo_t * ) evinfo_gp;
  2068. ctx_t *ctx_p = ( ( struct dosync_arg * ) arg_gp )->ctx_p;
  2069. indexes_t *indexes_p = ( ( struct dosync_arg * ) arg_gp )->indexes_p;
  2070. sync_queuesync ( fpath_rel, evinfo, ctx_p, indexes_p, QUEUE_AUTO );
  2071. return;
  2072. }
  2073. int sync_prequeue_unload ( ctx_t *ctx_p, indexes_t *indexes_p )
  2074. {
  2075. struct dosync_arg dosync_arg;
  2076. dosync_arg.ctx_p = ctx_p;
  2077. dosync_arg.indexes_p = indexes_p;
  2078. debug ( 3, "collected %i events per this time.", g_hash_table_size ( indexes_p->fpath2ei_ht ) );
  2079. g_hash_table_foreach ( indexes_p->fpath2ei_ht, sync_queuesync_wrapper, &dosync_arg );
  2080. g_hash_table_remove_all ( indexes_p->fpath2ei_ht );
  2081. return 0;
  2082. }
  2083. int sync_idle_dosync_collectedevents_aggrqueue ( queue_id_t queue_id, ctx_t *ctx_p, indexes_t *indexes_p, struct dosync_arg *dosync_arg )
  2084. {
  2085. time_t tm = time ( NULL );
  2086. queueinfo_t *queueinfo = &ctx_p->_queues[queue_id];
  2087. if ( ( ctx_p->state == STATE_RUNNING ) && ( queueinfo->stime + queueinfo->collectdelay > tm ) && ( queueinfo->collectdelay != COLLECTDELAY_INSTANT ) && ( !ctx_p->flags[EXITONNOEVENTS] ) ) {
  2088. debug ( 3, "(%i, ...): too early (%i + %i > %i).", queue_id, queueinfo->stime, queueinfo->collectdelay, tm );
  2089. return 0;
  2090. }
  2091. queueinfo->stime = 0;
  2092. int evcount_real = g_hash_table_size ( indexes_p->fpath2ei_coll_ht[queue_id] );
  2093. debug ( 3, "(%i, ...): evcount_real == %i", queue_id, evcount_real );
  2094. if ( evcount_real <= 0 ) {
  2095. debug ( 3, "(%i, ...): no events, return 0.", queue_id );
  2096. return 0;
  2097. }
  2098. switch ( queue_id ) {
  2099. case QUEUE_LOCKWAIT: {
  2100. struct trylocked_arg arg_data = {0};
  2101. dosync_arg->data = &arg_data;
  2102. g_hash_table_foreach_remove ( indexes_p->fpath2ei_coll_ht[queue_id], sync_trylocked, dosync_arg );
  2103. // Placing to global queues recently unlocked objects
  2104. sync_prequeue_unload ( ctx_p, indexes_p );
  2105. #ifdef PARANOID
  2106. if ( arg_data.path_full != NULL )
  2107. #endif
  2108. free ( arg_data.path_full );
  2109. break;
  2110. }
  2111. default: {
  2112. g_hash_table_foreach ( indexes_p->fpath2ei_coll_ht[queue_id], _sync_idle_dosync_collectedevents, dosync_arg );
  2113. g_hash_table_remove_all ( indexes_p->fpath2ei_coll_ht[queue_id] );
  2114. if ( !ctx_p->flags[RSYNCPREFERINCLUDE] ) {
  2115. g_hash_table_foreach ( indexes_p->exc_fpath_coll_ht[queue_id], _sync_idle_dosync_collectedexcludes, dosync_arg );
  2116. g_hash_table_remove_all ( indexes_p->exc_fpath_coll_ht[queue_id] );
  2117. }
  2118. break;
  2119. }
  2120. }
  2121. return 0;
  2122. }
  2123. int sync_idle_dosync_collectedevents_uniqfname ( ctx_t *ctx_p, char *fpath, char *name )
  2124. {
  2125. pid_t pid = getpid();
  2126. time_t tm = time ( NULL );
  2127. stat64_t stat64;
  2128. int counter = 0;
  2129. do {
  2130. snprintf ( fpath, PATH_MAX, "%s/.clsync-%s.%u.%lu.%lu.%u", ctx_p->listoutdir, name, pid, ( long ) pthread_self(), ( unsigned long ) tm, rand() ); // To be unique
  2131. lstat64 ( fpath, &stat64 );
  2132. if ( counter++ > COUNTER_LIMIT ) {
  2133. error ( "Cannot find unused filename for list-file. The last try was \"%s\".", fpath );
  2134. return ENOENT;
  2135. }
  2136. } while ( errno != ENOENT ); // TODO: find another way to check if the object exists
  2137. errno = 0;
  2138. return 0;
  2139. }
  2140. int sync_idle_dosync_collectedevents_listcreate ( struct dosync_arg *dosync_arg_p, char *name )
  2141. {
  2142. debug ( 3, "Creating %s file", name );
  2143. char *fpath = dosync_arg_p->outf_path;
  2144. ctx_t *ctx_p = dosync_arg_p->ctx_p;
  2145. int ret;
  2146. if ( ( ret = sync_idle_dosync_collectedevents_uniqfname ( ctx_p, fpath, name ) ) ) {
  2147. error ( "sync_idle_dosync_collectedevents_listcreate: Cannot get unique file name." );
  2148. return ret;
  2149. }
  2150. dosync_arg_p->outf = fopen ( fpath, "w" );
  2151. if ( dosync_arg_p->outf == NULL ) {
  2152. error ( "Cannot open \"%s\" as file for writing.", fpath );
  2153. return errno;
  2154. }
  2155. setbuffer ( dosync_arg_p->outf, dosync_arg_p->buf, BUFSIZ );
  2156. debug ( 3, "Created list-file \"%s\"", fpath );
  2157. dosync_arg_p->linescount = 0;
  2158. return 0;
  2159. }
  2160. size_t rsync_escape_result_size = 0;
  2161. char *rsync_escape_result = NULL;
  2162. void rsync_escape_cleanup()
  2163. {
  2164. if ( rsync_escape_result_size )
  2165. free ( rsync_escape_result );
  2166. }
  2167. const char *rsync_escape ( const char *path )
  2168. {
  2169. size_t sc_count = 0;
  2170. size_t i = 0;
  2171. while ( 1 ) {
  2172. switch ( path[i] ) {
  2173. case 0:
  2174. goto l_rsync_escape_loop0_end;
  2175. case '[':
  2176. case ']':
  2177. case '*':
  2178. case '?':
  2179. case '\\':
  2180. sc_count++;
  2181. }
  2182. i++;
  2183. };
  2184. l_rsync_escape_loop0_end:
  2185. if ( !sc_count )
  2186. return path;
  2187. size_t required_size = i + sc_count + 1;
  2188. if ( required_size >= rsync_escape_result_size ) {
  2189. rsync_escape_result_size = required_size + ALLOC_PORTION;
  2190. rsync_escape_result = xrealloc ( rsync_escape_result, rsync_escape_result_size );
  2191. }
  2192. // TODO: Optimize this. Second "switch" is a bad way.
  2193. i++;
  2194. while ( i-- ) {
  2195. rsync_escape_result[i + sc_count] = path[i];
  2196. switch ( path[i] ) {
  2197. case '[':
  2198. case ']':
  2199. case '*':
  2200. case '?':
  2201. case '\\':
  2202. sc_count--;
  2203. rsync_escape_result[i + sc_count] = '\\';
  2204. break;
  2205. }
  2206. }
  2207. return rsync_escape_result;
  2208. }
  2209. static inline int rsync_outline ( FILE *outf, char *outline, eventinfo_flags_t flags )
  2210. {
  2211. #ifdef VERYPARANOID
  2212. critical_on ( outf == NULL );
  2213. #endif
  2214. // doxygen recognizes / * * as start of a doxy comment, so trick it with concatenated strings
  2215. if ( flags & EVIF_RECURSIVELY ) {
  2216. debug ( 3, "Recursively \"%s\": Writing to rsynclist: \"%s/*""**\".", outline, outline );
  2217. critical_on ( fprintf ( outf, "%s/*""**\n", outline ) <= 0 );
  2218. } else if ( flags & EVIF_CONTENTRECURSIVELY ) {
  2219. debug ( 3, "Content-recursively \"%s\": Writing to rsynclist: \"%s/*""*\".", outline, outline );
  2220. critical_on ( fprintf ( outf, "%s/*""*\n", outline ) <= 0 );
  2221. } else {
  2222. debug ( 3, "Non-recursively \"%s\": Writing to rsynclist: \"%s\".", outline, outline );
  2223. critical_on ( fprintf ( outf, "%s\n", outline ) <= 0 );
  2224. }
  2225. return 0;
  2226. }
  2227. gboolean rsync_aggrout ( gpointer outline_gp, gpointer flags_gp, gpointer arg_gp )
  2228. {
  2229. struct dosync_arg *dosync_arg_p = ( struct dosync_arg * ) arg_gp;
  2230. char *outline = ( char * ) outline_gp;
  2231. FILE *outf = dosync_arg_p->outf;
  2232. eventinfo_flags_t flags = ( eventinfo_flags_t ) GPOINTER_TO_INT ( flags_gp );
  2233. // debug(3, "\"%s\"", outline);
  2234. int ret;
  2235. if ( ( ret = rsync_outline ( outf, outline, flags ) ) ) {
  2236. error ( "Got error from rsync_outline(). Exit." );
  2237. exit ( ret ); // TODO: replace this with kill(0, ...)
  2238. }
  2239. return TRUE;
  2240. }
  2241. static inline int rsync_listpush ( indexes_t *indexes_p, const char *fpath, size_t fpath_len, eventinfo_flags_t flags, unsigned int *linescount_p )
  2242. {
  2243. char *fpathwslash;
  2244. if ( fpath_len > 0 ) {
  2245. // Prepending with the slash
  2246. fpathwslash = alloca ( fpath_len + 2 );
  2247. fpathwslash[0] = '/';
  2248. memcpy ( &fpathwslash[1], fpath, fpath_len + 1 );
  2249. } else {
  2250. // In this case slash is not required
  2251. fpathwslash = ( char * ) fpath;
  2252. }
  2253. fpathwslash = ( char * ) rsync_escape ( fpathwslash );
  2254. char *end = fpathwslash;
  2255. debug ( 3, "\"%s\": Adding to rsynclist: \"%s\" with flags %p.",
  2256. fpathwslash, fpathwslash, ( void * ) ( long ) flags );
  2257. indexes_outaggr_add ( indexes_p, strdup ( fpathwslash ), flags );
  2258. if ( linescount_p != NULL )
  2259. ( *linescount_p )++;
  2260. while ( end != NULL ) {
  2261. if ( *fpathwslash == 0x00 )
  2262. break;
  2263. debug ( 3, "Non-recursively \"%s\": Adding to rsynclist: \"%s\".", fpathwslash, fpathwslash );
  2264. indexes_outaggr_add ( indexes_p, strdup ( fpathwslash ), EVIF_NONE );
  2265. if ( linescount_p != NULL )
  2266. ( *linescount_p )++;
  2267. end = strrchr ( fpathwslash, '/' );
  2268. if ( end == NULL )
  2269. break;
  2270. if ( end - fpathwslash <= 0 )
  2271. break;
  2272. *end = 0x00;
  2273. };
  2274. return 0;
  2275. }
  2276. gboolean sync_idle_dosync_collectedevents_rsync_exclistpush ( gpointer fpath_gp, gpointer flags_gp, gpointer arg_gp )
  2277. {
  2278. struct dosync_arg *dosync_arg_p = ( struct dosync_arg * ) arg_gp;
  2279. char *fpath = ( char * ) fpath_gp;
  2280. FILE *excf = dosync_arg_p->outf;
  2281. eventinfo_flags_t flags = GPOINTER_TO_INT ( flags_gp );
  2282. // ctx_t *ctx_p = dosync_arg_p->ctx_p;
  2283. // indexes_t *indexes_p = dosync_arg_p->indexes_p;
  2284. debug ( 3, "\"%s\"", fpath );
  2285. size_t fpath_len = strlen ( fpath );
  2286. char *fpathwslash;
  2287. if ( fpath_len > 0 ) {
  2288. // Prepending with the slash
  2289. fpathwslash = alloca ( fpath_len + 2 );
  2290. fpathwslash[0] = '/';
  2291. memcpy ( &fpathwslash[1], fpath, fpath_len + 1 );
  2292. } else {
  2293. // In this case slash is not required
  2294. fpathwslash = fpath;
  2295. }
  2296. fpathwslash = ( char * ) rsync_escape ( fpathwslash );
  2297. int ret;
  2298. if ( ( ret = rsync_outline ( excf, fpathwslash, flags ) ) ) {
  2299. error ( "Got error from rsync_outline(). Exit." );
  2300. exit ( ret ); // TODO: replace this with kill(0, ...)
  2301. }
  2302. return TRUE;
  2303. }
  2304. int sync_idle_dosync_collectedevents_commitpart ( struct dosync_arg *dosync_arg_p )
  2305. {
  2306. ctx_t *ctx_p = dosync_arg_p->ctx_p;
  2307. indexes_t *indexes_p = dosync_arg_p->indexes_p;
  2308. debug ( 3, "Committing the file (flags[MODE] == %i)", ctx_p->flags[MODE] );
  2309. if (
  2310. ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) ||
  2311. ( ctx_p->flags[MODE] == MODE_RSYNCSHELL ) ||
  2312. ( ctx_p->flags[MODE] == MODE_RSYNCSO )
  2313. )
  2314. g_hash_table_foreach_remove ( indexes_p->out_lines_aggr_ht, rsync_aggrout, dosync_arg_p );
  2315. if ( dosync_arg_p->outf != NULL ) {
  2316. critical_on ( fclose ( dosync_arg_p->outf ) );
  2317. dosync_arg_p->outf = NULL;
  2318. }
  2319. if ( dosync_arg_p->evcount > 0 ) {
  2320. thread_callbackfunct_arg_t *callback_arg_p;
  2321. debug ( 3, "%s [%s] (%p) -> %s [%s]", ctx_p->watchdir, ctx_p->watchdirwslash, ctx_p->watchdirwslash,
  2322. ctx_p->destdir ? ctx_p->destdir : "", ctx_p->destdirwslash ? ctx_p->destdirwslash : "" );
  2323. if ( ctx_p->flags[MODE] == MODE_SO ) {
  2324. api_eventinfo_t *ei = dosync_arg_p->api_ei;
  2325. return so_call_sync ( ctx_p, indexes_p, dosync_arg_p->evcount, ei );
  2326. }
  2327. if ( ctx_p->flags[MODE] == MODE_RSYNCSO )
  2328. return so_call_rsync (
  2329. ctx_p,
  2330. indexes_p,
  2331. dosync_arg_p->outf_path,
  2332. * ( dosync_arg_p->excf_path ) ? dosync_arg_p->excf_path : NULL );
  2333. callback_arg_p = xcalloc ( 1, sizeof ( *callback_arg_p ) );
  2334. if ( ctx_p->synchandler_argf & SHFL_INCLUDE_LIST_PATH )
  2335. callback_arg_p->incfpath = strdup ( dosync_arg_p->outf_path );
  2336. if ( ctx_p->synchandler_argf & SHFL_EXCLUDE_LIST_PATH )
  2337. callback_arg_p->excfpath = strdup ( dosync_arg_p->excf_path );
  2338. {
  2339. int rc;
  2340. dosync_arg_p->list_type_str =
  2341. ctx_p->flags[MODE] == MODE_RSYNCDIRECT ||
  2342. ctx_p->flags[MODE] == MODE_RSYNCSHELL
  2343. ? "rsynclist" : "synclist";
  2344. debug ( 9, "dosync_arg_p->include_list_count == %u", dosync_arg_p->include_list_count );
  2345. char **argv = sync_customargv ( ctx_p, dosync_arg_p, &ctx_p->synchandler_args[SHARGS_PRIMARY] );
  2346. while ( dosync_arg_p->include_list_count )
  2347. free ( ( char * ) dosync_arg_p->include_list[--dosync_arg_p->include_list_count] );
  2348. rc = SYNC_EXEC_ARGV (
  2349. ctx_p,
  2350. indexes_p,
  2351. sync_idle_dosync_collectedevents_cleanup,
  2352. callback_arg_p,
  2353. argv );
  2354. #ifdef THREADING_SUPPORT
  2355. if ( !SHOULD_THREAD ( ctx_p ) ) // If it's a thread then it will free the argv in GC. If not a thread then we have to free right here.
  2356. #endif
  2357. argv_free ( argv );
  2358. return rc;
  2359. }
  2360. }
  2361. return 0;
  2362. #ifdef DOXYGEN
  2363. sync_exec_argv ( NULL, NULL );
  2364. sync_exec_argv_thread ( NULL, NULL );
  2365. #endif
  2366. }
  2367. void sync_inclist_rotate ( ctx_t *ctx_p, struct dosync_arg *dosync_arg_p )
  2368. {
  2369. int ret;
  2370. char newexc_path[PATH_MAX + 1];
  2371. if ( ctx_p->synchandler_argf & SHFL_EXCLUDE_LIST_PATH ) {
  2372. // TODO: optimize this out {
  2373. if ( ( ret = sync_idle_dosync_collectedevents_uniqfname ( ctx_p, newexc_path, "exclist" ) ) ) {
  2374. error ( "Cannot get unique file name." );
  2375. exit ( ret );
  2376. }
  2377. if ( ( ret = fileutils_copy ( dosync_arg_p->excf_path, newexc_path ) ) ) {
  2378. error ( "Cannot copy file \"%s\" to \"%s\".", dosync_arg_p->excf_path, newexc_path );
  2379. exit ( ret );
  2380. }
  2381. // }
  2382. // That's required to copy excludes' list file for every rsync execution.
  2383. // The problem appears do to unlink()-ing the excludes' list file on callback function
  2384. // "sync_idle_dosync_collectedevents_cleanup()" of every execution.
  2385. }
  2386. if ( ( ret = sync_idle_dosync_collectedevents_commitpart ( dosync_arg_p ) ) ) {
  2387. error ( "Cannot commit list-file \"%s\"", dosync_arg_p->outf_path );
  2388. exit ( ret ); // TODO: replace with kill(0, ...);
  2389. }
  2390. if ( ctx_p->synchandler_argf & SHFL_INCLUDE_LIST_PATH ) {
  2391. #ifdef VERYPARANOID
  2392. require_strlen_le ( newexc_path, PATH_MAX );
  2393. #endif
  2394. strcpy ( dosync_arg_p->excf_path, newexc_path ); // TODO: optimize this out
  2395. if ( ( ret = sync_idle_dosync_collectedevents_listcreate ( dosync_arg_p, "list" ) ) ) {
  2396. error ( "Cannot create new list-file" );
  2397. exit ( ret ); // TODO: replace with kill(0, ...);
  2398. }
  2399. }
  2400. return;
  2401. }
  2402. void sync_idle_dosync_collectedevents_listpush ( gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp )
  2403. {
  2404. struct dosync_arg *dosync_arg_p = ( struct dosync_arg * ) arg_gp;
  2405. char *fpath = ( char * ) fpath_gp;
  2406. eventinfo_t *evinfo = ( eventinfo_t * ) evinfo_gp;
  2407. //int *evcount_p =&dosync_arg_p->evcount;
  2408. FILE *outf = dosync_arg_p->outf;
  2409. ctx_t *ctx_p = dosync_arg_p->ctx_p;
  2410. unsigned int *linescount_p = &dosync_arg_p->linescount;
  2411. indexes_t *indexes_p = dosync_arg_p->indexes_p;
  2412. api_eventinfo_t **api_ei_p = &dosync_arg_p->api_ei;
  2413. int *api_ei_count_p = &dosync_arg_p->api_ei_count;
  2414. debug ( 3, "\"%s\" with int-flags %p. "
  2415. "evinfo: seqid_min == %u, seqid_max == %u type_o == %i, type_n == %i",
  2416. fpath, ( void * ) ( unsigned long ) evinfo->flags,
  2417. evinfo->seqid_min, evinfo->seqid_max,
  2418. evinfo->objtype_old, evinfo->objtype_new
  2419. );
  2420. // so-module case:
  2421. if ( ctx_p->flags[MODE] == MODE_SO ) {
  2422. api_eventinfo_t *ei = & ( *api_ei_p ) [ ( *api_ei_count_p )++];
  2423. ei->evmask = evinfo->evmask;
  2424. ei->flags = evinfo->flags;
  2425. ei->objtype_old = evinfo->objtype_old;
  2426. ei->objtype_new = evinfo->objtype_new;
  2427. ei->path_len = strlen ( fpath );
  2428. ei->path = strdup ( fpath );
  2429. return;
  2430. }
  2431. if ( ctx_p->synchandler_argf & SHFL_INCLUDE_LIST ) {
  2432. dosync_arg_p->include_list[dosync_arg_p->include_list_count++] = strdup ( fpath );
  2433. if (
  2434. dosync_arg_p->include_list_count >= ( size_t )
  2435. ( MAXARGUMENTS -
  2436. MAX (
  2437. ctx_p->synchandler_args[SHARGS_PRIMARY].c,
  2438. ctx_p->synchandler_args[SHARGS_INITIAL].c
  2439. )
  2440. )
  2441. )
  2442. sync_inclist_rotate ( ctx_p, dosync_arg_p );
  2443. }
  2444. // Finish if we don't use list files
  2445. if ( ! ( ctx_p->synchandler_argf &
  2446. ( SHFL_INCLUDE_LIST_PATH | SHFL_EXCLUDE_LIST_PATH ) ) )
  2447. return;
  2448. // List files cases:
  2449. // non-RSYNC case
  2450. if ( ! (
  2451. ( ctx_p->flags[MODE] == MODE_RSYNCSHELL ) ||
  2452. ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) ||
  2453. ( ctx_p->flags[MODE] == MODE_RSYNCSO )
  2454. ) ) {
  2455. if ( ctx_p->flags[SYNCLISTSIMPLIFY] ) {
  2456. critical_on ( fprintf ( outf, "%s\n", fpath ) <= 0 );
  2457. } else {
  2458. critical_on ( fprintf ( outf, "sync %s %i %s\n", ctx_p->label, evinfo->evmask, fpath ) <= 0 );
  2459. }
  2460. return;
  2461. }
  2462. // RSYNC case
  2463. if ( ctx_p->rsyncinclimit && ( *linescount_p >= ctx_p->rsyncinclimit ) )
  2464. sync_inclist_rotate ( ctx_p, dosync_arg_p );
  2465. int ret;
  2466. if ( ( ret = rsync_listpush ( indexes_p, fpath, strlen ( fpath ), evinfo->flags, linescount_p ) ) ) {
  2467. error ( "Got error from rsync_listpush(). Exit." );
  2468. exit ( ret );
  2469. }
  2470. return;
  2471. }
  2472. int sync_idle_dosync_collectedevents ( ctx_t *ctx_p, indexes_t *indexes_p )
  2473. {
  2474. debug ( 3, "" );
  2475. struct dosync_arg dosync_arg = {0};
  2476. dosync_arg.ctx_p = ctx_p;
  2477. dosync_arg.indexes_p = indexes_p;
  2478. char isrsyncpreferexclude =
  2479. (
  2480. ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) ||
  2481. ( ctx_p->flags[MODE] == MODE_RSYNCSHELL ) ||
  2482. ( ctx_p->flags[MODE] == MODE_RSYNCSO )
  2483. ) && ( !ctx_p->flags[RSYNCPREFERINCLUDE] );
  2484. #ifdef PARANOID
  2485. if ( ctx_p->listoutdir != NULL ) {
  2486. g_hash_table_remove_all ( indexes_p->fpath2ei_ht );
  2487. if ( isrsyncpreferexclude )
  2488. g_hash_table_remove_all ( indexes_p->exc_fpath_ht );
  2489. }
  2490. #endif
  2491. // Setting the time to sync not before it:
  2492. ctx_p->synctime = time ( NULL ) + ctx_p->syncdelay;
  2493. debug ( 3, "Next sync will be not before: %u", ctx_p->synctime );
  2494. int queue_id = 0;
  2495. while ( queue_id < QUEUE_MAX ) {
  2496. int ret;
  2497. if ( ( queue_id == QUEUE_LOCKWAIT ) && ( ctx_p->flags[THREADING] != PM_SAFE ) ) {
  2498. queue_id++;
  2499. continue;
  2500. }
  2501. queue_id_t *queue_id_p = ( queue_id_t * ) &dosync_arg.data;
  2502. *queue_id_p = queue_id;
  2503. ret = sync_idle_dosync_collectedevents_aggrqueue ( queue_id, ctx_p, indexes_p, &dosync_arg );
  2504. if ( ret ) {
  2505. error ( "Got error while processing queue #%i\n.", queue_id );
  2506. g_hash_table_remove_all ( indexes_p->fpath2ei_ht );
  2507. if ( isrsyncpreferexclude )
  2508. g_hash_table_remove_all ( indexes_p->exc_fpath_ht );
  2509. return ret;
  2510. }
  2511. queue_id++;
  2512. }
  2513. if ( !dosync_arg.evcount ) {
  2514. debug ( 3, "Summary events' count is zero. Return 0." );
  2515. return 0;
  2516. }
  2517. if ( ctx_p->flags[MODE] == MODE_SO ) {
  2518. //dosync_arg.evcount = g_hash_table_size(indexes_p->fpath2ei_ht);
  2519. debug ( 3, "There's %i events. Processing.", dosync_arg.evcount );
  2520. dosync_arg.api_ei = ( api_eventinfo_t * ) xmalloc ( dosync_arg.evcount * sizeof ( *dosync_arg.api_ei ) );
  2521. }
  2522. {
  2523. int ret;
  2524. if ( ( ctx_p->listoutdir != NULL ) || ( ctx_p->flags[MODE] == MODE_SO ) ) {
  2525. if ( ! ( ctx_p->flags[MODE] == MODE_SO ) ) {
  2526. * ( dosync_arg.excf_path ) = 0x00;
  2527. if ( isrsyncpreferexclude ) {
  2528. if ( ( ret = sync_idle_dosync_collectedevents_listcreate ( &dosync_arg, "exclist" ) ) ) {
  2529. error ( "Cannot create list-file" );
  2530. return ret;
  2531. }
  2532. #ifdef PARANOID
  2533. g_hash_table_remove_all ( indexes_p->out_lines_aggr_ht );
  2534. #endif
  2535. g_hash_table_foreach_remove ( indexes_p->exc_fpath_ht, sync_idle_dosync_collectedevents_rsync_exclistpush, &dosync_arg );
  2536. g_hash_table_foreach_remove ( indexes_p->out_lines_aggr_ht, rsync_aggrout, &dosync_arg );
  2537. critical_on ( fclose ( dosync_arg.outf ) );
  2538. #ifdef VERYPARANOID
  2539. require_strlen_le ( dosync_arg.outf_path, PATH_MAX );
  2540. #endif
  2541. strcpy ( dosync_arg.excf_path, dosync_arg.outf_path ); // TODO: remove this strcpy()
  2542. }
  2543. if ( ( ret = sync_idle_dosync_collectedevents_listcreate ( &dosync_arg, "list" ) ) ) {
  2544. error ( "Cannot create list-file" );
  2545. return ret;
  2546. }
  2547. }
  2548. }
  2549. if ( ( ctx_p->listoutdir != NULL ) || ( ctx_p->flags[MODE] == MODE_SO ) || ( ctx_p->synchandler_argf & SHFL_INCLUDE_LIST ) ) {
  2550. #ifdef PARANOID
  2551. g_hash_table_remove_all ( indexes_p->out_lines_aggr_ht );
  2552. #endif
  2553. g_hash_table_foreach ( indexes_p->fpath2ei_ht, sync_idle_dosync_collectedevents_listpush, &dosync_arg );
  2554. if ( ( ret = sync_idle_dosync_collectedevents_commitpart ( &dosync_arg ) ) ) {
  2555. error ( "Cannot submit to sync the list \"%s\"", dosync_arg.outf_path );
  2556. // TODO: free dosync_arg.api_ei on case of error
  2557. g_hash_table_remove_all ( indexes_p->fpath2ei_ht );
  2558. return ret;
  2559. }
  2560. g_hash_table_remove_all ( indexes_p->fpath2ei_ht );
  2561. }
  2562. }
  2563. finish_iteration ( ctx_p );
  2564. return 0;
  2565. }
  2566. int apievinfo2rsynclist ( indexes_t *indexes_p, FILE *listfile, int n, api_eventinfo_t *apievinfo )
  2567. {
  2568. int i;
  2569. if ( listfile == NULL ) {
  2570. error ( "listfile == NULL." );
  2571. return EINVAL;
  2572. }
  2573. i = 0;
  2574. while ( i < n ) {
  2575. rsync_listpush ( indexes_p, apievinfo[i].path, apievinfo[i].path_len, apievinfo[i].flags, NULL );
  2576. i++;
  2577. }
  2578. struct dosync_arg dosync_arg = {0};
  2579. dosync_arg.outf = listfile;
  2580. g_hash_table_foreach_remove ( indexes_p->out_lines_aggr_ht, rsync_aggrout, &dosync_arg );
  2581. return 0;
  2582. }
  2583. int sync_idle ( ctx_t *ctx_p, indexes_t *indexes_p )
  2584. {
  2585. int ret;
  2586. // Collecting garbage
  2587. #ifdef THREADING_SUPPORT
  2588. ret = thread_gc ( ctx_p );
  2589. if ( ret ) return ret;
  2590. #endif
  2591. // Checking if we can sync
  2592. if ( ctx_p->flags[STANDBYFILE] ) {
  2593. struct stat st;
  2594. if ( !stat ( ctx_p->standbyfile, &st ) ) {
  2595. state_t state_old;
  2596. state_old = ctx_p->state;
  2597. ctx_p->state = STATE_HOLDON;
  2598. main_status_update ( ctx_p );
  2599. debug ( 1, "Found standby file. Holding over syncs. Sleeping "XTOSTR ( SLEEP_SECONDS ) " second." );
  2600. sleep ( SLEEP_SECONDS );
  2601. ctx_p->state = state_old;
  2602. main_status_update ( ctx_p );
  2603. return 0;
  2604. }
  2605. }
  2606. // Syncing
  2607. debug ( 3, "calling sync_idle_dosync_collectedevents()" );
  2608. #ifdef CLUSTER_SUPPORT
  2609. ret = cluster_lock_byindexes();
  2610. if ( ret ) return ret;
  2611. #endif
  2612. ret = sync_idle_dosync_collectedevents ( ctx_p, indexes_p );
  2613. if ( ret ) return ret;
  2614. #ifdef CLUSTER_SUPPORT
  2615. ret = cluster_unlock_all();
  2616. if ( ret ) return ret;
  2617. #endif
  2618. return 0;
  2619. }
  2620. int notify_wait ( ctx_t *ctx_p, indexes_t *indexes_p )
  2621. {
  2622. static struct timeval tv;
  2623. time_t tm = time ( NULL );
  2624. long delay = ( ( unsigned long ) ~0 >> 1 );
  2625. threadsinfo_t *threadsinfo_p = thread_info();
  2626. debug ( 4, "pthread_mutex_unlock(&threadsinfo_p->mutex[PTHREAD_MUTEX_STATE])" );
  2627. pthread_cond_broadcast ( &threadsinfo_p->cond[PTHREAD_MUTEX_STATE] );
  2628. pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
  2629. long queue_id = 0;
  2630. while ( queue_id < QUEUE_MAX ) {
  2631. queueinfo_t *queueinfo = &ctx_p->_queues[queue_id++];
  2632. if ( !queueinfo->stime )
  2633. continue;
  2634. if ( queueinfo->collectdelay == COLLECTDELAY_INSTANT ) {
  2635. debug ( 3, "There're events in instant queue (#%i), don't waiting.", queue_id - 1 );
  2636. return 0;
  2637. }
  2638. int qdelay = queueinfo->stime + queueinfo->collectdelay - tm;
  2639. debug ( 3, "queue #%i: %i %i %i -> %i", queue_id - 1, queueinfo->stime, queueinfo->collectdelay, tm, qdelay );
  2640. if ( qdelay < - ( long ) ctx_p->syncdelay )
  2641. qdelay = - ( long ) ctx_p->syncdelay;
  2642. delay = MIN ( delay, qdelay );
  2643. }
  2644. long synctime_delay = ( ( long ) ctx_p->synctime ) - ( ( long ) tm );
  2645. synctime_delay = synctime_delay > 0 ? synctime_delay : 0;
  2646. debug ( 3, "delay = MAX(%li, %li)", delay, synctime_delay );
  2647. delay = MAX ( delay, synctime_delay );
  2648. delay = delay > 0 ? delay : 0;
  2649. #ifdef THREADING_SUPPORT
  2650. if ( ctx_p->flags[THREADING] ) {
  2651. time_t _thread_nextexpiretime = thread_nextexpiretime();
  2652. debug ( 3, "thread_nextexpiretime == %i", _thread_nextexpiretime );
  2653. if ( _thread_nextexpiretime ) {
  2654. long thread_expiredelay = ( long ) thread_nextexpiretime() - ( long ) tm + 1; // +1 is to make "tm>threadinfo_p->expiretime" after select() definitely TRUE
  2655. debug ( 3, "thread_expiredelay == %i", thread_expiredelay );
  2656. thread_expiredelay = thread_expiredelay > 0 ? thread_expiredelay : 0;
  2657. debug ( 3, "delay = MIN(%li, %li)", delay, thread_expiredelay );
  2658. delay = MIN ( delay, thread_expiredelay );
  2659. }
  2660. }
  2661. #endif
  2662. if ( ( !delay ) || ( ctx_p->state != STATE_RUNNING && ctx_p->state != STATE_LASTSYNC ) ) {
  2663. debug ( 4, "forcing: no events (delay is %li, state is %s)",
  2664. delay, status_descr[ctx_p->state] )
  2665. return 0;
  2666. }
  2667. if ( ctx_p->flags[EXITONNOEVENTS] || ctx_p->state == STATE_LASTSYNC ) {
  2668. // zero delay if "--exit-on-no-events" is set or if it is the last sync (see "--sync-on-quit")
  2669. tv.tv_sec = 0;
  2670. tv.tv_usec = 0;
  2671. } else {
  2672. debug ( 3, "sleeping for %li second(s).", SLEEP_SECONDS );
  2673. sleep ( SLEEP_SECONDS );
  2674. delay = ( ( long ) delay ) > SLEEP_SECONDS ? delay - SLEEP_SECONDS : 0;
  2675. tv.tv_sec = delay;
  2676. tv.tv_usec = 0;
  2677. }
  2678. debug ( 4, "pthread_mutex_lock(&threadsinfo_p->mutex[PTHREAD_MUTEX_STATE])" );
  2679. pthread_mutex_lock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
  2680. if ( ctx_p->state != STATE_RUNNING && ctx_p->state != STATE_LASTSYNC )
  2681. return 0;
  2682. debug ( 4, "pthread_mutex_unlock(&threadsinfo_p->mutex[PTHREAD_MUTEX_STATE])" );
  2683. pthread_cond_broadcast ( &threadsinfo_p->cond[PTHREAD_MUTEX_STATE] );
  2684. pthread_mutex_lock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_SELECT] );
  2685. pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
  2686. debug ( 8, "ctx_p->notifyenginefunct.wait() [%p]", ctx_p->notifyenginefunct.wait );
  2687. int ret = ctx_p->notifyenginefunct.wait ( ctx_p, indexes_p, &tv );
  2688. pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_SELECT] );
  2689. if ( ( ret == -1 ) && ( errno == EINTR ) ) {
  2690. errno = 0;
  2691. ret = 0;
  2692. }
  2693. debug ( 4, "pthread_mutex_lock(&threadsinfo_p->mutex[PTHREAD_MUTEX_STATE])" );
  2694. pthread_mutex_lock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
  2695. if ( ( ctx_p->flags[EXITONNOEVENTS] ) && ( ret == 0 ) && ( ctx_p->state != STATE_LASTSYNC ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
  2696. // if not events and "--exit-on-no-events" is set
  2697. if ( ctx_p->flags[PREEXITHOOK] || ctx_p->flags[SOFTEXITSYNC] )
  2698. ctx_p->state = STATE_PREEXIT;
  2699. else
  2700. ctx_p->state = STATE_EXIT;
  2701. }
  2702. return ret;
  2703. }
  2704. #define SYNC_LOOP_IDLE {\
  2705. int ret;\
  2706. if((ret=sync_idle(ctx_p, indexes_p))) {\
  2707. error("got error while sync_idle().");\
  2708. return ret;\
  2709. }\
  2710. }
  2711. #define SYNC_LOOP_CONTINUE_UNLOCK {\
  2712. pthread_cond_broadcast(&threadsinfo_p->cond[PTHREAD_MUTEX_STATE]);\
  2713. debug(4, "pthread_mutex_unlock()");\
  2714. pthread_mutex_unlock(&threadsinfo_p->mutex[PTHREAD_MUTEX_STATE]);\
  2715. continue;\
  2716. }
  2717. void hook_preexit ( ctx_t *ctx_p )
  2718. {
  2719. debug ( 2, "\"%s\" \"%s\"", ctx_p->preexithookfile, ctx_p->label );
  2720. #ifdef VERYPARANOID
  2721. if ( ctx_p->preexithookfile == NULL )
  2722. critical ( "ctx_p->preexithookfile == NULL" );
  2723. #endif
  2724. char *argv[] = { ctx_p->preexithookfile, ctx_p->label, NULL};
  2725. exec_argv ( argv, NULL );
  2726. return;
  2727. }
  2728. int sync_loop ( ctx_t *ctx_p, indexes_t *indexes_p )
  2729. {
  2730. int ret;
  2731. threadsinfo_t *threadsinfo_p = thread_info();
  2732. state_p = &ctx_p->state;
  2733. ctx_p->state = ctx_p->flags[SKIPINITSYNC] ? STATE_RUNNING : STATE_INITSYNC;
  2734. while ( ctx_p->state != STATE_EXIT ) {
  2735. int events;
  2736. debug ( 4, "pthread_mutex_lock(): PTHREAD_MUTEX_STATE" );
  2737. pthread_mutex_lock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
  2738. debug ( 3, "current state is \"%s\" (%i) (iteration: %u/%u); threadsinfo_p->used == %u",
  2739. status_descr[ctx_p->state], ctx_p->state,
  2740. ctx_p->iteration_num, ctx_p->flags[MAXITERATIONS], threadsinfo_p->used );
  2741. while ( ( ctx_p->flags[THREADING] == PM_OFF ) && threadsinfo_p->used ) {
  2742. debug ( 1, "We are in non-threading mode but have %u syncer threads. Waiting for them end.", threadsinfo_p->used );
  2743. pthread_cond_wait ( &threadsinfo_p->cond[PTHREAD_MUTEX_STATE], &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
  2744. pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
  2745. }
  2746. events = 0;
  2747. switch ( ctx_p->state ) {
  2748. case STATE_THREAD_GC:
  2749. main_status_update ( ctx_p );
  2750. #ifdef THREADING_SUPPORT
  2751. if ( thread_gc ( ctx_p ) ) {
  2752. ctx_p->state = STATE_EXIT;
  2753. break;
  2754. }
  2755. #endif
  2756. ctx_p->state = STATE_RUNNING;
  2757. SYNC_LOOP_CONTINUE_UNLOCK;
  2758. case STATE_INITSYNC:
  2759. if ( !ctx_p->flags[THREADING] ) {
  2760. ctx_p->iteration_num = 0;
  2761. setenv_iteration ( ctx_p->iteration_num );
  2762. }
  2763. main_status_update ( ctx_p );
  2764. pthread_cond_broadcast ( &threadsinfo_p->cond[PTHREAD_MUTEX_STATE] );
  2765. pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
  2766. ret = sync_initialsync ( ctx_p->watchdir, ctx_p, indexes_p, INITSYNC_FULL );
  2767. if ( ret ) return ret;
  2768. if ( ( ctx_p->state == STATE_TERM ) || ( ctx_p->state == STATE_EXIT ) ) {
  2769. continue;
  2770. }
  2771. if ( ctx_p->flags[ONLYINITSYNC] ) {
  2772. SYNC_LOOP_IDLE;
  2773. ctx_p->state = STATE_EXIT;
  2774. return ret;
  2775. }
  2776. ctx_p->state = STATE_RUNNING;
  2777. continue;
  2778. case STATE_RUNNING:
  2779. if ( ( !ctx_p->flags[THREADING] ) && ctx_p->flags[MAXITERATIONS] ) {
  2780. if ( ( typeof ( ctx_p->iteration_num ) ) ctx_p->flags[MAXITERATIONS] == ctx_p->iteration_num - 1 )
  2781. ctx_p->state = STATE_PREEXIT;
  2782. else if ( ( typeof ( ctx_p->iteration_num ) ) ctx_p->flags[MAXITERATIONS] <= ctx_p->iteration_num )
  2783. ctx_p->state = STATE_EXIT;
  2784. }
  2785. case STATE_PREEXIT:
  2786. if ( ctx_p->state == STATE_PREEXIT && ctx_p->flags[PREEXITHOOK] == 0 && ctx_p->flags[SOFTEXITSYNC] == 0 ) {
  2787. ctx_p->state = STATE_TERM;
  2788. SYNC_LOOP_IDLE;
  2789. }
  2790. case STATE_LASTSYNC:
  2791. switch ( ctx_p->state ) {
  2792. case STATE_PREEXIT:
  2793. debug ( 1, "preparing to exit" );
  2794. main_status_update ( ctx_p );
  2795. if ( ctx_p->flags[PREEXITHOOK] )
  2796. hook_preexit ( ctx_p );
  2797. if ( ctx_p->flags[SOFTEXITSYNC] ) {
  2798. ctx_p->state = STATE_LASTSYNC;
  2799. } else {
  2800. ctx_p->state = STATE_TERM;
  2801. }
  2802. break;
  2803. case STATE_LASTSYNC:
  2804. debug ( 3, "notify_wait ( ctx_p, indexes_p ) [lastsync]" );
  2805. events = notify_wait ( ctx_p, indexes_p );
  2806. ctx_p->state = STATE_TERM;
  2807. break;
  2808. case STATE_RUNNING:
  2809. debug ( 3, "notify_wait ( ctx_p, indexes_p )" );
  2810. events = notify_wait ( ctx_p, indexes_p );
  2811. break;
  2812. default:
  2813. SYNC_LOOP_CONTINUE_UNLOCK;
  2814. }
  2815. break;
  2816. case STATE_REHASH:
  2817. main_status_update ( ctx_p );
  2818. debug ( 1, "rehashing." );
  2819. main_rehash ( ctx_p );
  2820. ctx_p->state = STATE_RUNNING;
  2821. SYNC_LOOP_CONTINUE_UNLOCK;
  2822. case STATE_TERM:
  2823. main_status_update ( ctx_p );
  2824. ctx_p->state = STATE_EXIT;
  2825. case STATE_EXIT:
  2826. main_status_update ( ctx_p );
  2827. SYNC_LOOP_CONTINUE_UNLOCK;
  2828. default:
  2829. critical ( "internal error: ctx_p->state == %u", ctx_p->state );
  2830. break;
  2831. }
  2832. pthread_cond_broadcast ( &threadsinfo_p->cond[PTHREAD_MUTEX_STATE] );
  2833. debug ( 4, "pthread_mutex_unlock(): PTHREAD_MUTEX_STATE" );
  2834. pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
  2835. if ( events == 0 ) {
  2836. debug ( 2, "events == 0" );
  2837. SYNC_LOOP_IDLE;
  2838. continue; // Timeout
  2839. }
  2840. if ( events < 0 ) {
  2841. error ( "Got error while waiting for event from notify subsystem." );
  2842. return errno;
  2843. }
  2844. debug ( 3, "ctx_p->notifyenginefunct.handle" );
  2845. int count = ctx_p->notifyenginefunct.handle ( ctx_p, indexes_p );
  2846. if ( count <= 0 ) {
  2847. error ( "Cannot handle with notify events." );
  2848. return errno;
  2849. }
  2850. main_status_update ( ctx_p );
  2851. if ( ctx_p->flags[EXITONNOEVENTS] ) // clsync exits on no events, so sync_idle() is never called. We have to force the calling of it.
  2852. SYNC_LOOP_IDLE;
  2853. }
  2854. debug ( 1, "last SYNC_LOOP_IDLE" );
  2855. SYNC_LOOP_IDLE;
  2856. debug ( 1, "end" );
  2857. return exitcode;
  2858. #ifdef DOXYGEN
  2859. sync_idle ( 0, NULL, NULL );
  2860. #endif
  2861. }
  2862. void sync_sig_int ( int signal )
  2863. {
  2864. debug ( 2, "%i: Thread %p", signal, pthread_self() );
  2865. return;
  2866. }
  2867. #ifdef PARANOID
  2868. int _sync_tryforcecycle_i;
  2869. #endif
  2870. int sync_tryforcecycle ( ctx_t *ctx_p, pthread_t pthread_parent )
  2871. {
  2872. ( void ) pthread_parent;
  2873. debug ( 3, "sending signal to interrupt blocking operations like select()-s and so on (ctx_p->blockthread_count == %i)", ctx_p->blockthread_count );
  2874. //pthread_kill(pthread_parent, SIGUSR_BLOPINT);
  2875. int i, count;
  2876. count = ctx_p->blockthread_count;
  2877. i = 0;
  2878. while ( i < count ) {
  2879. debug ( 2, "Sending SIGUSR_BLOPINT to thread %p", ctx_p->blockthread[i] );
  2880. pthread_kill ( ctx_p->blockthread[i], SIGUSR_BLOPINT );
  2881. i++;
  2882. }
  2883. #ifdef PARANOID
  2884. if ( ++_sync_tryforcecycle_i > KILL_TIMEOUT ) {
  2885. error ( "Seems we got a deadlock." );
  2886. return EDEADLK;
  2887. }
  2888. #endif
  2889. #ifdef SYNC_SWITCHSTATE_COND_TIMEDWAIT // Hangs
  2890. struct timespec time_timeout;
  2891. clock_gettime ( CLOCK_REALTIME, &time_timeout );
  2892. time_timeout.tv_sec++;
  2893. // time_timeout.tv_sec = now.tv_sec;
  2894. debug ( 3, "pthread_cond_timedwait() until %li.%li", time_timeout.tv_sec, time_timeout.tv_nsec );
  2895. if ( pthread_cond_timedwait ( pthread_cond_state, pthread_mutex_state, &time_timeout ) != ETIMEDOUT )
  2896. return 0;
  2897. #else
  2898. debug ( 9, "sleep("XTOSTR ( SLEEP_SECONDS ) ")" );
  2899. sleep ( SLEEP_SECONDS ); // TODO: replace this with pthread_cond_timedwait()
  2900. #endif
  2901. return EINPROGRESS;
  2902. }
  2903. int sync_switch_state ( ctx_t *ctx_p, pthread_t pthread_parent, int newstate )
  2904. {
  2905. if ( state_p == NULL ) {
  2906. debug ( 3, "sync_switch_state(ctx_p, %p, %i), but state_p == NULL", pthread_parent, newstate );
  2907. return 0;
  2908. }
  2909. debug ( 3, "sync_switch_state(ctx_p, %p, %i)", pthread_parent, newstate );
  2910. // Getting mutexes
  2911. threadsinfo_t *threadsinfo_p = thread_info();
  2912. if ( threadsinfo_p == NULL ) {
  2913. // If no mutexes, just change the state
  2914. goto l_sync_parent_interrupt_end;
  2915. }
  2916. if ( !threadsinfo_p->mutex_init ) {
  2917. // If no mutexes, just change the state
  2918. goto l_sync_parent_interrupt_end;
  2919. }
  2920. pthread_mutex_t *pthread_mutex_state = &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE];
  2921. pthread_mutex_t *pthread_mutex_select = &threadsinfo_p->mutex[PTHREAD_MUTEX_SELECT];
  2922. pthread_cond_t *pthread_cond_state = &threadsinfo_p->cond [PTHREAD_MUTEX_STATE];
  2923. // Locking all necessary mutexes
  2924. #ifdef PARANOID
  2925. _sync_tryforcecycle_i = 0;
  2926. #endif
  2927. debug ( 4, "while(pthread_mutex_trylock( pthread_mutex_state ))" );
  2928. while ( pthread_mutex_trylock ( pthread_mutex_state ) == EBUSY ) {
  2929. int rc = sync_tryforcecycle ( ctx_p, pthread_parent );
  2930. if ( rc && rc != EINPROGRESS )
  2931. return rc;
  2932. if ( !rc )
  2933. break;
  2934. }
  2935. #ifdef PARANOID
  2936. _sync_tryforcecycle_i = 0;
  2937. #endif
  2938. debug ( 4, "while(pthread_mutex_trylock( pthread_mutex_select ))" );
  2939. while ( pthread_mutex_trylock ( pthread_mutex_select ) == EBUSY ) {
  2940. int rc = sync_tryforcecycle ( ctx_p, pthread_parent );
  2941. if ( rc && rc != EINPROGRESS )
  2942. return rc;
  2943. if ( !rc )
  2944. break;
  2945. }
  2946. // Changing the state
  2947. *state_p = newstate;
  2948. #ifdef PARANOID
  2949. pthread_kill ( pthread_parent, SIGUSR_BLOPINT );
  2950. #endif
  2951. // Unlocking mutexes
  2952. debug ( 4, "pthread_cond_broadcast(). New state is %i.", *state_p );
  2953. pthread_cond_broadcast ( pthread_cond_state );
  2954. debug ( 4, "pthread_mutex_unlock( pthread_mutex_state )" );
  2955. pthread_mutex_unlock ( pthread_mutex_state );
  2956. debug ( 4, "pthread_mutex_unlock( pthread_mutex_select )" );
  2957. pthread_mutex_unlock ( pthread_mutex_select );
  2958. #ifdef THREADING_SUPPORT
  2959. return thread_info_unlock ( 0 );
  2960. #else
  2961. return 0;
  2962. #endif
  2963. l_sync_parent_interrupt_end:
  2964. *state_p = newstate;
  2965. pthread_kill ( pthread_parent, SIGUSR_BLOPINT );
  2966. #ifdef THREADING_SUPPORT
  2967. return thread_info_unlock ( 0 );
  2968. #else
  2969. return 0;
  2970. #endif
  2971. }
  2972. /* === DUMP === */
  2973. enum dump_dirfd_obj {
  2974. DUMP_DIRFD_ROOT = 0,
  2975. DUMP_DIRFD_QUEUE,
  2976. DUMP_DIRFD_THREAD,
  2977. DUMP_DIRFD_MAX
  2978. };
  2979. enum dump_ltype {
  2980. DUMP_LTYPE_INCLUDE,
  2981. DUMP_LTYPE_EXCLUDE,
  2982. DUMP_LTYPE_EVINFO,
  2983. };
  2984. struct sync_dump_arg {
  2985. ctx_t *ctx_p;
  2986. int dirfd[DUMP_DIRFD_MAX];
  2987. int fd_out;
  2988. int data;
  2989. };
  2990. void sync_dump_liststep ( gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp )
  2991. {
  2992. char *fpath = ( char * ) fpath_gp;
  2993. eventinfo_t *evinfo = ( eventinfo_t * ) evinfo_gp;
  2994. struct sync_dump_arg *arg = arg_gp;
  2995. char act, num;
  2996. if ( fpath == NULL || evinfo == NULL )
  2997. return;
  2998. switch ( arg->data ) {
  2999. case DUMP_LTYPE_INCLUDE:
  3000. act = '+';
  3001. num = '1';
  3002. break;
  3003. case DUMP_LTYPE_EXCLUDE:
  3004. act = '-';
  3005. num = '1';
  3006. break;
  3007. case DUMP_LTYPE_EVINFO:
  3008. act = '+';
  3009. num = evinfo->flags & EVIF_RECURSIVELY ? '*' :
  3010. ( evinfo->flags & EVIF_CONTENTRECURSIVELY ? '/' : '1' );
  3011. break;
  3012. default:
  3013. act = '?';
  3014. num = '?';
  3015. }
  3016. dprintf ( arg->fd_out, "%c%c\t%s\n", act, num, fpath );
  3017. return;
  3018. }
  3019. int sync_dump_thread ( threadinfo_t *threadinfo_p, void *_arg )
  3020. {
  3021. struct sync_dump_arg *arg = _arg;
  3022. char buf[BUFSIZ];
  3023. snprintf ( buf, BUFSIZ, "%u-%u-%lx", threadinfo_p->iteration, threadinfo_p->thread_num, ( long ) threadinfo_p->pthread );
  3024. arg->fd_out = openat ( arg->dirfd[DUMP_DIRFD_THREAD], buf, O_WRONLY | O_CREAT, DUMP_FILEMODE );
  3025. if ( arg->fd_out == -1 )
  3026. return errno;
  3027. {
  3028. char **argv;
  3029. dprintf ( arg->fd_out,
  3030. "thread:\n\titeration == %u\n\tnum == %u\n\tpthread == %lx\n\tstarttime == %lu\n\texpiretime == %lu\n\tchild_pid == %u\n\ttry_n == %u\nCommand:",
  3031. threadinfo_p->iteration,
  3032. threadinfo_p->thread_num,
  3033. ( long ) threadinfo_p->pthread,
  3034. threadinfo_p->starttime,
  3035. threadinfo_p->expiretime,
  3036. threadinfo_p->child_pid,
  3037. threadinfo_p->try_n
  3038. );
  3039. argv = threadinfo_p->argv;
  3040. while ( *argv != NULL )
  3041. dprintf ( arg->fd_out, " \"%s\"", * ( argv++ ) );
  3042. dprintf ( arg->fd_out, "\n" );
  3043. }
  3044. arg->data = DUMP_LTYPE_EVINFO;
  3045. g_hash_table_foreach ( threadinfo_p->fpath2ei_ht, sync_dump_liststep, arg );
  3046. close ( arg->fd_out );
  3047. return 0;
  3048. }
  3049. int sync_dump ( ctx_t *ctx_p, const char *const dir_path )
  3050. {
  3051. indexes_t *indexes_p = ctx_p->indexes_p;
  3052. int rootfd, fd_out;
  3053. struct sync_dump_arg arg = {0};
  3054. enum dump_dirfd_obj dirfd_obj;
  3055. arg.ctx_p = ctx_p;
  3056. debug ( 3, "%s", dir_path );
  3057. if ( dir_path == NULL )
  3058. return EINVAL;
  3059. static const char *const subdirs[] = {
  3060. [DUMP_DIRFD_QUEUE] = "queue",
  3061. [DUMP_DIRFD_THREAD] = "threads"
  3062. };
  3063. errno = 0;
  3064. rootfd = mkdirat_open ( dir_path, AT_FDCWD, DUMP_DIRMODE );
  3065. if ( rootfd == -1 ) {
  3066. error ( "Cannot open directory \"%s\"", dir_path );
  3067. goto l_sync_dump_end;
  3068. }
  3069. fd_out = openat ( rootfd, "instance", O_WRONLY | O_CREAT, DUMP_FILEMODE );
  3070. if ( fd_out == -1 ) {
  3071. error ( "Cannot open file \"%s\" for writing" );
  3072. goto l_sync_dump_end;
  3073. }
  3074. dprintf ( fd_out, "status == %s\n", getenv ( "CLSYNC_STATUS" ) ); // TODO: remove getenv() from here
  3075. arg.fd_out = fd_out;
  3076. arg.data = DUMP_LTYPE_EVINFO;
  3077. if ( indexes_p->nonthreaded_syncing_fpath2ei_ht != NULL )
  3078. g_hash_table_foreach ( indexes_p->nonthreaded_syncing_fpath2ei_ht, sync_dump_liststep, &arg );
  3079. close ( fd_out );
  3080. arg.dirfd[DUMP_DIRFD_ROOT] = rootfd;
  3081. dirfd_obj = DUMP_DIRFD_ROOT + 1;
  3082. while ( dirfd_obj < DUMP_DIRFD_MAX ) {
  3083. const char *const subdir = subdirs[dirfd_obj];
  3084. arg.dirfd[dirfd_obj] = mkdirat_open ( subdir, rootfd, DUMP_DIRMODE );
  3085. if ( arg.dirfd[dirfd_obj] == -1 ) {
  3086. error ( "Cannot open directory \"%s\"", subdir );
  3087. goto l_sync_dump_end;
  3088. }
  3089. dirfd_obj++;
  3090. }
  3091. int queue_id = 0;
  3092. while ( queue_id < QUEUE_MAX ) {
  3093. char buf[BUFSIZ];
  3094. snprintf ( buf, BUFSIZ, "%u", queue_id );
  3095. arg.fd_out = openat ( arg.dirfd[DUMP_DIRFD_QUEUE], buf, O_WRONLY | O_CREAT, DUMP_FILEMODE );
  3096. arg.data = DUMP_LTYPE_EVINFO;
  3097. g_hash_table_foreach ( indexes_p->fpath2ei_coll_ht[queue_id], sync_dump_liststep, &arg );
  3098. if ( indexes_p->exc_fpath_coll_ht[queue_id] != NULL ) {
  3099. arg.data = DUMP_LTYPE_EXCLUDE;
  3100. g_hash_table_foreach ( indexes_p->exc_fpath_coll_ht[queue_id], sync_dump_liststep, &arg );
  3101. }
  3102. close ( arg.fd_out );
  3103. queue_id++;
  3104. }
  3105. #ifdef THREADING_SUPPORT
  3106. threads_foreach ( sync_dump_thread, STATE_RUNNING, &arg );
  3107. #endif
  3108. l_sync_dump_end:
  3109. dirfd_obj = DUMP_DIRFD_ROOT;
  3110. while ( dirfd_obj < DUMP_DIRFD_MAX ) {
  3111. if ( arg.dirfd[dirfd_obj] != -1 && arg.dirfd[dirfd_obj] != 0 )
  3112. close ( arg.dirfd[dirfd_obj] );
  3113. dirfd_obj++;
  3114. }
  3115. if ( errno )
  3116. error ( "Cannot create the dump to \"%s\"", dir_path );
  3117. return errno;
  3118. }
  3119. /* === /DUMP === */
  3120. void sync_sigchld()
  3121. {
  3122. debug ( 9, "" );
  3123. privileged_check();
  3124. return;
  3125. }
  3126. int *sync_sighandler_exitcode_p = NULL;
  3127. int sync_sighandler ( sighandler_arg_t *sighandler_arg_p )
  3128. {
  3129. int signal = 0, ret;
  3130. sigset_t sigset_full;
  3131. ctx_t *ctx_p = sighandler_arg_p->ctx_p;
  3132. // indexes_t *indexes_p = sighandler_arg_p->indexes_p;
  3133. pthread_t pthread_parent = sighandler_arg_p->pthread_parent;
  3134. // sigset_t *sigset_p = sighandler_arg_p->sigset_p;
  3135. int *exitcode_p = sighandler_arg_p->exitcode_p;
  3136. sync_sighandler_exitcode_p = exitcode_p;
  3137. sethandler_sigchld ( sync_sigchld );
  3138. sigfillset ( &sigset_full );
  3139. while ( state_p == NULL || ( ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) ) {
  3140. debug ( 3, "waiting for signal (is sigset filled == %i)", sigismember ( &sigset_full, SIGTERM ) );
  3141. ret = sigwait ( &sigset_full, &signal );
  3142. if ( state_p == NULL ) {
  3143. switch ( signal ) {
  3144. case SIGALRM:
  3145. *exitcode_p = ETIME;
  3146. case SIGQUIT:
  3147. case SIGTERM:
  3148. case SIGINT:
  3149. case SIGCHLD:
  3150. // TODO: remove the exit() from here. Main thread should exit itself
  3151. exit ( *exitcode_p );
  3152. break;
  3153. default:
  3154. warning ( "Got signal %i, but the main loop is not started, yet. Ignoring the signal.", signal );
  3155. break;
  3156. }
  3157. continue;
  3158. }
  3159. debug ( 3, "got signal %i. ctx_p->state == %i.", signal, ctx_p->state );
  3160. if ( ret ) {
  3161. // TODO: handle an error here
  3162. }
  3163. if ( ctx_p->customsignal[signal] != NULL ) {
  3164. if ( config_block_parse ( ctx_p, ctx_p->customsignal[signal] ) ) {
  3165. *exitcode_p = errno;
  3166. signal = SIGTERM;
  3167. }
  3168. continue;
  3169. }
  3170. switch ( signal ) {
  3171. case SIGALRM:
  3172. *exitcode_p = ETIME;
  3173. case SIGQUIT:
  3174. sync_switch_state ( ctx_p, pthread_parent, STATE_PREEXIT );
  3175. break;
  3176. case SIGTERM:
  3177. case SIGINT:
  3178. sync_switch_state ( ctx_p, pthread_parent, STATE_TERM );
  3179. // bugfix of https://github.com/clsync/clsync/issues/44
  3180. while ( ctx_p->children ) { // Killing children if non-pthread mode or/and (mode=="so" or mode=="rsyncso")
  3181. pid_t child_pid = ctx_p->child_pid[--ctx_p->children];
  3182. if ( privileged_kill_child ( child_pid, signal, 0 ) == ENOENT )
  3183. continue;
  3184. if ( signal != SIGQUIT )
  3185. if ( privileged_kill_child ( child_pid, SIGQUIT, 0 ) == ENOENT )
  3186. continue;
  3187. if ( signal != SIGTERM )
  3188. if ( privileged_kill_child ( child_pid, SIGTERM, 0 ) == ENOENT )
  3189. continue;
  3190. if ( privileged_kill_child ( child_pid, SIGKILL, 0 ) == ENOENT )
  3191. continue;
  3192. }
  3193. break;
  3194. case SIGHUP:
  3195. sync_switch_state ( ctx_p, pthread_parent, STATE_REHASH );
  3196. break;
  3197. case SIGCHLD:
  3198. sync_sigchld();
  3199. break;
  3200. case SIGUSR_THREAD_GC:
  3201. sync_switch_state ( ctx_p, pthread_parent, STATE_THREAD_GC );
  3202. break;
  3203. case SIGUSR_INITSYNC:
  3204. sync_switch_state ( ctx_p, pthread_parent, STATE_INITSYNC );
  3205. break;
  3206. case SIGUSR_DUMP:
  3207. sync_dump ( ctx_p, ctx_p->dump_path );
  3208. break;
  3209. default:
  3210. error ( "Unknown signal: %i. Exit.", signal );
  3211. sync_switch_state ( ctx_p, pthread_parent, STATE_TERM );
  3212. break;
  3213. }
  3214. }
  3215. debug ( 3, "signal handler closed." );
  3216. return 0;
  3217. }
  3218. int sync_term ( int exitcode )
  3219. {
  3220. *sync_sighandler_exitcode_p = exitcode;
  3221. return pthread_kill ( pthread_sighandler, SIGTERM );
  3222. }
  3223. __extension__ int sync_run ( ctx_t *ctx_p )
  3224. {
  3225. int ret;
  3226. sighandler_arg_t sighandler_arg = {0};
  3227. indexes_t indexes = {NULL};
  3228. debug ( 9, "Creating signal handler thread" );
  3229. {
  3230. int i;
  3231. register_blockthread();
  3232. sigset_t sigset_sighandler;
  3233. sigemptyset ( &sigset_sighandler );
  3234. sigaddset ( &sigset_sighandler, SIGALRM );
  3235. sigaddset ( &sigset_sighandler, SIGHUP );
  3236. sigaddset ( &sigset_sighandler, SIGQUIT );
  3237. sigaddset ( &sigset_sighandler, SIGTERM );
  3238. sigaddset ( &sigset_sighandler, SIGINT );
  3239. sigaddset ( &sigset_sighandler, SIGCHLD );
  3240. sigaddset ( &sigset_sighandler, SIGUSR_THREAD_GC );
  3241. sigaddset ( &sigset_sighandler, SIGUSR_INITSYNC );
  3242. sigaddset ( &sigset_sighandler, SIGUSR_DUMP );
  3243. i = 0;
  3244. while ( i < MAXSIGNALNUM ) {
  3245. if ( ctx_p->customsignal[i] != NULL )
  3246. sigaddset ( &sigset_sighandler, i );
  3247. i++;
  3248. }
  3249. ret = pthread_sigmask ( SIG_BLOCK, &sigset_sighandler, NULL );
  3250. if ( ret ) return ret;
  3251. sighandler_arg.ctx_p = ctx_p;
  3252. sighandler_arg.pthread_parent = pthread_self();
  3253. sighandler_arg.exitcode_p = &ret;
  3254. sighandler_arg.sigset_p = &sigset_sighandler;
  3255. ret = pthread_create ( &pthread_sighandler, NULL, ( void * ( * ) ( void * ) ) sync_sighandler, &sighandler_arg );
  3256. if ( ret ) return ret;
  3257. sigset_t sigset_parent;
  3258. sigemptyset ( &sigset_parent );
  3259. sigaddset ( &sigset_parent, SIGUSR_BLOPINT );
  3260. ret = pthread_sigmask ( SIG_UNBLOCK, &sigset_parent, NULL );
  3261. if ( ret ) return ret;
  3262. signal ( SIGUSR_BLOPINT, sync_sig_int );
  3263. }
  3264. debug ( 9, "Creating hash tables" );
  3265. {
  3266. int i;
  3267. ctx_p->indexes_p = &indexes;
  3268. indexes.wd2fpath_ht = g_hash_table_new_full ( g_direct_hash, g_direct_equal, 0, 0 );
  3269. indexes.fpath2wd_ht = g_hash_table_new_full ( g_str_hash, g_str_equal, free, 0 );
  3270. indexes.fpath2ei_ht = g_hash_table_new_full ( g_str_hash, g_str_equal, free, free );
  3271. indexes.exc_fpath_ht = g_hash_table_new_full ( g_str_hash, g_str_equal, free, 0 );
  3272. indexes.out_lines_aggr_ht = g_hash_table_new_full ( g_str_hash, g_str_equal, free, 0 );
  3273. indexes.fileinfo_ht = g_hash_table_new_full ( g_str_hash, g_str_equal, free, free );
  3274. i = 0;
  3275. while ( i < QUEUE_MAX ) {
  3276. switch ( i ) {
  3277. case QUEUE_LOCKWAIT:
  3278. indexes.fpath2ei_coll_ht[i] = g_hash_table_new_full ( g_str_hash, g_str_equal, free, 0 );
  3279. break;
  3280. default:
  3281. indexes.fpath2ei_coll_ht[i] = g_hash_table_new_full ( g_str_hash, g_str_equal, free, free );
  3282. indexes.exc_fpath_coll_ht[i] = g_hash_table_new_full ( g_str_hash, g_str_equal, free, 0 );
  3283. }
  3284. i++;
  3285. }
  3286. }
  3287. debug ( 9, "Loading dynamical libraries" );
  3288. if ( ctx_p->flags[MODE] == MODE_SO || ctx_p->flags[MODE] == MODE_RSYNCSO ) {
  3289. /* security checks before dlopen */
  3290. struct stat so_stat;
  3291. if ( stat ( ctx_p->handlerfpath, &so_stat ) == -1 ) {
  3292. error ( "Can't stat shared object file \"%s\": %s", ctx_p->handlerfpath, strerror ( errno ) );
  3293. return errno;
  3294. }
  3295. // allow normal files only (stat will follow symlinks)
  3296. if ( !S_ISREG ( so_stat.st_mode ) ) {
  3297. error ( "Shared object \"%s\" must be a regular file (or symlink to a regular file).",
  3298. ctx_p->handlerfpath, so_stat.st_uid );
  3299. return EPERM;
  3300. }
  3301. // allowed owners are: root and real uid (who started clsync prior to setuid)
  3302. if ( so_stat.st_uid && so_stat.st_uid != getuid() ) {
  3303. /* check for rare case when clsync binary owner is neither root nor current uid */
  3304. struct stat cl_stat;
  3305. char *cl_str = alloca ( 20 ); // allocate for "/proc/PID/exe"
  3306. int ret;
  3307. snprintf ( cl_str, 20, "/proc/%i/exe", getpid() );
  3308. // stat clsync binary itself to get its owner's uid
  3309. if ( ( ret = stat ( cl_str, &cl_stat ) ) == -1 ) {
  3310. error ( "Can't stat clsync binary file \"%s\": %s", cl_str, strerror ( errno ) );
  3311. }
  3312. if ( ret == -1 || so_stat.st_uid != cl_stat.st_uid ) {
  3313. error ( "Wrong owner for shared object \"%s\": %i. "
  3314. "Only root, clsync file owner and user started the program are allowed.",
  3315. ctx_p->handlerfpath, so_stat.st_uid );
  3316. return EPERM;
  3317. }
  3318. }
  3319. // do not allow special bits and g+w,o+w
  3320. if ( so_stat.st_mode & ( S_ISUID | S_ISGID | S_ISVTX | S_IWGRP | S_IWOTH ) ) {
  3321. error ( "Wrong shared object \"%s\" permissions: %#lo"
  3322. "Special bits, group and world writable are not allowed.",
  3323. ctx_p->handlerfpath, so_stat.st_mode & 07777 );
  3324. return EPERM;
  3325. }
  3326. // dlopen()
  3327. void *synchandler_handle = dlopen ( ctx_p->handlerfpath, RTLD_NOW | RTLD_LOCAL );
  3328. if ( synchandler_handle == NULL ) {
  3329. error ( "Cannot load shared object file \"%s\": %s", ctx_p->handlerfpath, dlerror() );
  3330. return -1;
  3331. }
  3332. // resolving init, sync and deinit functions' handlers
  3333. ctx_p->handler_handle = synchandler_handle;
  3334. ctx_p->handler_funct.init = ( api_funct_init ) dlsym ( ctx_p->handler_handle, API_PREFIX"init" );
  3335. if ( ctx_p->flags[MODE] == MODE_RSYNCSO ) {
  3336. ctx_p->handler_funct.rsync = ( api_funct_rsync ) dlsym ( ctx_p->handler_handle, API_PREFIX"rsync" );
  3337. if ( ctx_p->handler_funct.rsync == NULL ) {
  3338. char *dlerror_str = dlerror();
  3339. error ( "Cannot resolve symbol "API_PREFIX"rsync in shared object \"%s\": %s",
  3340. ctx_p->handlerfpath, dlerror_str != NULL ? dlerror_str : "No error description returned." );
  3341. }
  3342. } else {
  3343. ctx_p->handler_funct.sync = ( api_funct_sync ) dlsym ( ctx_p->handler_handle, API_PREFIX"sync" );
  3344. if ( ctx_p->handler_funct.sync == NULL ) {
  3345. char *dlerror_str = dlerror();
  3346. error ( "Cannot resolve symbol "API_PREFIX"sync in shared object \"%s\": %s",
  3347. ctx_p->handlerfpath, dlerror_str != NULL ? dlerror_str : "No error description returned." );
  3348. }
  3349. }
  3350. ctx_p->handler_funct.deinit = ( api_funct_deinit ) dlsym ( ctx_p->handler_handle, API_PREFIX"deinit" );
  3351. // running init function
  3352. if ( ctx_p->handler_funct.init != NULL )
  3353. if ( ( ret = ctx_p->handler_funct.init ( ctx_p, &indexes ) ) ) {
  3354. error ( "Cannot init sync-handler module." );
  3355. return ret;
  3356. }
  3357. }
  3358. // Initializing rand-generator if it's required
  3359. if ( ctx_p->listoutdir )
  3360. srand ( time ( NULL ) );
  3361. if ( !ctx_p->flags[ONLYINITSYNC] ) {
  3362. debug ( 9, "Initializing FS monitor kernel subsystem in this userspace application" );
  3363. if ( sync_notify_init ( ctx_p ) )
  3364. return errno;
  3365. }
  3366. if ( ( ret = privileged_init ( ctx_p ) ) )
  3367. return ret;
  3368. {
  3369. // Preparing monitor subsystem context function pointers
  3370. switch ( ctx_p->flags[MONITOR] ) {
  3371. #ifdef INOTIFY_SUPPORT
  3372. case NE_INOTIFY:
  3373. ctx_p->notifyenginefunct.add_watch_dir = inotify_add_watch_dir;
  3374. ctx_p->notifyenginefunct.wait = inotify_wait;
  3375. ctx_p->notifyenginefunct.handle = inotify_handle;
  3376. break;
  3377. #endif
  3378. #ifdef KQUEUE_SUPPORT
  3379. case NE_KQUEUE:
  3380. ctx_p->notifyenginefunct.add_watch_dir = kqueue_add_watch_dir;
  3381. ctx_p->notifyenginefunct.wait = kqueue_wait;
  3382. ctx_p->notifyenginefunct.handle = kqueue_handle;
  3383. break;
  3384. #endif
  3385. #ifdef BSM_SUPPORT
  3386. case NE_BSM:
  3387. case NE_BSM_PREFETCH:
  3388. ctx_p->notifyenginefunct.add_watch_dir = bsm_add_watch_dir;
  3389. ctx_p->notifyenginefunct.wait = bsm_wait;
  3390. ctx_p->notifyenginefunct.handle = bsm_handle;
  3391. break;
  3392. #endif
  3393. #ifdef GIO_SUPPORT
  3394. case NE_GIO:
  3395. ctx_p->notifyenginefunct.add_watch_dir = gio_add_watch_dir;
  3396. ctx_p->notifyenginefunct.wait = gio_wait;
  3397. ctx_p->notifyenginefunct.handle = gio_handle;
  3398. break;
  3399. #endif
  3400. #ifdef DTRACEPIPE_SUPPORT
  3401. case NE_DTRACEPIPE:
  3402. ctx_p->notifyenginefunct.add_watch_dir = dtracepipe_add_watch_dir;
  3403. ctx_p->notifyenginefunct.wait = dtracepipe_wait;
  3404. ctx_p->notifyenginefunct.handle = dtracepipe_handle;
  3405. break;
  3406. #endif
  3407. #ifdef VERYPARANOID
  3408. default:
  3409. critical ( "Unknown FS monitor subsystem: %i", ctx_p->flags[MONITOR] );
  3410. #endif
  3411. }
  3412. }
  3413. #ifdef CLUSTER_SUPPORT
  3414. // Initializing cluster subsystem
  3415. if ( ctx_p->cluster_iface != NULL ) {
  3416. ret = cluster_init ( ctx_p, &indexes );
  3417. if ( ret ) {
  3418. error ( "Cannot initialize cluster subsystem." );
  3419. cluster_deinit();
  3420. return ret;
  3421. }
  3422. }
  3423. #endif
  3424. #ifdef ENABLE_SOCKET
  3425. // Creating control socket
  3426. if ( ctx_p->socketpath != NULL )
  3427. ret = control_run ( ctx_p );
  3428. #endif
  3429. if ( !ctx_p->flags[ONLYINITSYNC] ) {
  3430. // Marking file tree for FS monitor
  3431. debug ( 30, "Running recursive notify marking function" );
  3432. ret = sync_mark_walk ( ctx_p, ctx_p->watchdir, &indexes );
  3433. if ( ret ) return ret;
  3434. }
  3435. // "Infinite" loop of processling the events
  3436. ret = sync_loop ( ctx_p, &indexes );
  3437. if ( ret ) return ret;
  3438. debug ( 1, "sync_loop() ended" );
  3439. #ifdef ENABLE_SOCKET
  3440. // Removing control socket
  3441. if ( ctx_p->socketpath != NULL )
  3442. control_cleanup ( ctx_p );
  3443. #endif
  3444. debug ( 1, "killing sighandler" );
  3445. // TODO: Do cleanup of watching points
  3446. pthread_kill ( pthread_sighandler, SIGINT );
  3447. #ifdef VALGRIND
  3448. pthread_join ( pthread_sighandler, NULL ); // TODO: fix a deadlock
  3449. #endif
  3450. // Killing children
  3451. #ifdef THREADING_SUPPORT
  3452. thread_cleanup ( ctx_p );
  3453. #endif
  3454. debug ( 2, "Deinitializing the FS monitor subsystem" );
  3455. switch ( ctx_p->flags[MONITOR] ) {
  3456. #ifdef INOTIFY_SUPPORT
  3457. case NE_INOTIFY:
  3458. inotify_deinit ( ctx_p );
  3459. break;
  3460. #endif
  3461. #ifdef KQUEUE_SUPPORT
  3462. case NE_KQUEUE:
  3463. kqueue_deinit ( ctx_p );
  3464. break;
  3465. #endif
  3466. #ifdef BSM_SUPPORT
  3467. case NE_BSM:
  3468. case NE_BSM_PREFETCH:
  3469. bsm_deinit ( ctx_p );
  3470. break;
  3471. #endif
  3472. #ifdef GIO_SUPPORT
  3473. case NE_GIO:
  3474. gio_deinit ( ctx_p );
  3475. break;
  3476. #endif
  3477. #ifdef DTRACEPIPE_SUPPORT
  3478. case NE_DTRACEPIPE:
  3479. dtracepipe_deinit ( ctx_p );
  3480. break;
  3481. #endif
  3482. }
  3483. // Closing shared libraries
  3484. if ( ctx_p->flags[MODE] == MODE_SO ) {
  3485. int _ret;
  3486. if ( ctx_p->handler_funct.deinit != NULL )
  3487. if ( ( _ret = ctx_p->handler_funct.deinit() ) ) {
  3488. error ( "Cannot deinit sync-handler module." );
  3489. if ( !ret ) ret = _ret;
  3490. }
  3491. if ( dlclose ( ctx_p->handler_handle ) ) {
  3492. error ( "Cannot unload shared object file \"%s\": %s",
  3493. ctx_p->handlerfpath, dlerror() );
  3494. if ( !ret ) ret = -1;
  3495. }
  3496. }
  3497. // Cleaning up run-time routines
  3498. rsync_escape_cleanup();
  3499. // Removing hash-tables
  3500. {
  3501. int i;
  3502. debug ( 3, "Closing hash tables" );
  3503. g_hash_table_destroy ( indexes.wd2fpath_ht );
  3504. g_hash_table_destroy ( indexes.fpath2wd_ht );
  3505. g_hash_table_destroy ( indexes.fpath2ei_ht );
  3506. g_hash_table_destroy ( indexes.exc_fpath_ht );
  3507. g_hash_table_destroy ( indexes.out_lines_aggr_ht );
  3508. g_hash_table_destroy ( indexes.fileinfo_ht );
  3509. i = 0;
  3510. while ( i < QUEUE_MAX ) {
  3511. switch ( i ) {
  3512. case QUEUE_LOCKWAIT:
  3513. g_hash_table_destroy ( indexes.fpath2ei_coll_ht[i] );
  3514. break;
  3515. default:
  3516. g_hash_table_destroy ( indexes.fpath2ei_coll_ht[i] );
  3517. g_hash_table_destroy ( indexes.exc_fpath_coll_ht[i] );
  3518. }
  3519. i++;
  3520. }
  3521. }
  3522. // Deinitializing cluster subsystem
  3523. #ifdef CLUSTER_SUPPORT
  3524. debug ( 3, "Deinitializing cluster subsystem" );
  3525. if ( ctx_p->cluster_iface != NULL ) {
  3526. int _ret;
  3527. _ret = cluster_deinit();
  3528. if ( _ret ) {
  3529. error ( "Cannot deinitialize cluster subsystem.", strerror ( _ret ), _ret );
  3530. ret = _ret;
  3531. }
  3532. }
  3533. #endif
  3534. // One second for another threads
  3535. #ifdef VERYPARANOID
  3536. debug ( 9, "sleep("TOSTR ( SLEEP_SECONDS ) ")" );
  3537. sleep ( SLEEP_SECONDS );
  3538. #endif
  3539. if ( ctx_p->flags[EXITHOOK] ) {
  3540. char *argv[] = { ctx_p->exithookfile, ctx_p->label, NULL};
  3541. exec_argv ( argv, NULL );
  3542. }
  3543. // Cleaning up cgroups staff
  3544. #ifdef CGROUP_SUPPORT
  3545. debug ( 3, "Cleaning up cgroups staff" );
  3546. if ( ctx_p->flags[FORBIDDEVICES] )
  3547. error_on ( privileged_clsync_cgroup_deinit ( ctx_p ) );
  3548. #endif
  3549. debug ( 3, "privileged_deinit()" );
  3550. ret |= privileged_deinit ( ctx_p );
  3551. debug ( 3, "finish" );
  3552. return ret;
  3553. }