1/*-
2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in thereg
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/param.h>
29#include <sys/event.h>
30#include <sys/socket.h>
31#include <sys/stat.h>
32#include <sys/time.h>
33#include <sys/un.h>
34
35#include <assert.h>
36#include <err.h>
37#include <errno.h>
38#include <fcntl.h>
39#include <libutil.h>
40#include <pthread.h>
41#include <signal.h>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45#include <unistd.h>
46
47#include "agents/passwd.h"
48#include "agents/group.h"
49#include "agents/services.h"
50#include "cachelib.h"
51#include "config.h"
52#include "debug.h"
53#include "log.h"
54#include "nscdcli.h"
55#include "parser.h"
56#include "query.h"
57#include "singletons.h"
58
59#ifndef CONFIG_PATH
60#define CONFIG_PATH "/etc/nscd.conf"
61#endif
62#define DEFAULT_CONFIG_PATH	"nscd.conf"
63
64#define MAX_SOCKET_IO_SIZE	4096
65
66struct processing_thread_args {
67	cache	the_cache;
68	struct configuration	*the_configuration;
69	struct runtime_env		*the_runtime_env;
70};
71
72static void accept_connection(struct kevent *, struct runtime_env *,
73	struct configuration *);
74static void destroy_cache_(cache);
75static void destroy_runtime_env(struct runtime_env *);
76static cache init_cache_(struct configuration *);
77static struct runtime_env *init_runtime_env(struct configuration *);
78static void processing_loop(cache, struct runtime_env *,
79	struct configuration *);
80static void process_socket_event(struct kevent *, struct runtime_env *,
81	struct configuration *);
82static void process_timer_event(struct kevent *, struct runtime_env *,
83	struct configuration *);
84static void *processing_thread(void *);
85static void usage(void) __dead2;
86
87void get_time_func(struct timeval *);
88
89static void
90usage(void)
91{
92	fprintf(stderr,
93	    "usage: nscd [-dnst] [-i cachename] [-I cachename]\n");
94	exit(1);
95}
96
97static cache
98init_cache_(struct configuration *config)
99{
100	struct cache_params params;
101	cache retval;
102
103	struct configuration_entry *config_entry;
104	size_t	size, i;
105
106	TRACE_IN(init_cache_);
107
108	memset(&params, 0, sizeof(struct cache_params));
109	params.get_time_func = get_time_func;
110	retval = init_cache(&params);
111
112	size = configuration_get_entries_size(config);
113	for (i = 0; i < size; ++i) {
114		config_entry = configuration_get_entry(config, i);
115	    	/*
116	    	 * We should register common entries now - multipart entries
117	    	 * would be registered automatically during the queries.
118	    	 */
119		register_cache_entry(retval, (struct cache_entry_params *)
120			&config_entry->positive_cache_params);
121		config_entry->positive_cache_entry = find_cache_entry(retval,
122			config_entry->positive_cache_params.cep.entry_name);
123		assert(config_entry->positive_cache_entry !=
124			INVALID_CACHE_ENTRY);
125
126		register_cache_entry(retval, (struct cache_entry_params *)
127			&config_entry->negative_cache_params);
128		config_entry->negative_cache_entry = find_cache_entry(retval,
129			config_entry->negative_cache_params.cep.entry_name);
130		assert(config_entry->negative_cache_entry !=
131			INVALID_CACHE_ENTRY);
132	}
133
134	LOG_MSG_2("cache", "cache was successfully initialized");
135	TRACE_OUT(init_cache_);
136	return (retval);
137}
138
139static void
140destroy_cache_(cache the_cache)
141{
142	TRACE_IN(destroy_cache_);
143	destroy_cache(the_cache);
144	TRACE_OUT(destroy_cache_);
145}
146
147/*
148 * Socket and kqueues are prepared here. We have one global queue for both
149 * socket and timers events.
150 */
151static struct runtime_env *
152init_runtime_env(struct configuration *config)
153{
154	int serv_addr_len;
155	struct sockaddr_un serv_addr;
156
157	struct kevent eventlist;
158	struct timespec timeout;
159
160	struct runtime_env *retval;
161
162	TRACE_IN(init_runtime_env);
163	retval = calloc(1, sizeof(*retval));
164	assert(retval != NULL);
165
166	retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0);
167
168	if (config->force_unlink == 1)
169		unlink(config->socket_path);
170
171	memset(&serv_addr, 0, sizeof(struct sockaddr_un));
172	serv_addr.sun_family = PF_LOCAL;
173	strlcpy(serv_addr.sun_path, config->socket_path,
174		sizeof(serv_addr.sun_path));
175	serv_addr_len = sizeof(serv_addr.sun_family) +
176		strlen(serv_addr.sun_path) + 1;
177
178	if (bind(retval->sockfd, (struct sockaddr *)&serv_addr,
179		serv_addr_len) == -1) {
180		close(retval->sockfd);
181		free(retval);
182
183		LOG_ERR_2("runtime environment", "can't bind socket to path: "
184			"%s", config->socket_path);
185		TRACE_OUT(init_runtime_env);
186		return (NULL);
187	}
188	LOG_MSG_2("runtime environment", "using socket %s",
189		config->socket_path);
190
191	/*
192	 * Here we're marking socket as non-blocking and setting its backlog
193	 * to the maximum value
194	 */
195	chmod(config->socket_path, config->socket_mode);
196	listen(retval->sockfd, -1);
197	fcntl(retval->sockfd, F_SETFL, O_NONBLOCK);
198
199	retval->queue = kqueue();
200	assert(retval->queue != -1);
201
202	EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT,
203		0, 0, 0);
204	memset(&timeout, 0, sizeof(struct timespec));
205	kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
206
207	LOG_MSG_2("runtime environment", "successfully initialized");
208	TRACE_OUT(init_runtime_env);
209	return (retval);
210}
211
212static void
213destroy_runtime_env(struct runtime_env *env)
214{
215	TRACE_IN(destroy_runtime_env);
216	close(env->queue);
217	close(env->sockfd);
218	free(env);
219	TRACE_OUT(destroy_runtime_env);
220}
221
222static void
223accept_connection(struct kevent *event_data, struct runtime_env *env,
224	struct configuration *config)
225{
226	struct kevent	eventlist[2];
227	struct timespec	timeout;
228	struct query_state	*qstate;
229
230	int	fd;
231	int	res;
232
233	uid_t	euid;
234	gid_t	egid;
235
236	TRACE_IN(accept_connection);
237	fd = accept(event_data->ident, NULL, NULL);
238	if (fd == -1) {
239		LOG_ERR_2("accept_connection", "error %d during accept()",
240		    errno);
241		TRACE_OUT(accept_connection);
242		return;
243	}
244
245	if (getpeereid(fd, &euid, &egid) != 0) {
246		LOG_ERR_2("accept_connection", "error %d during getpeereid()",
247			errno);
248		TRACE_OUT(accept_connection);
249		return;
250	}
251
252	qstate = init_query_state(fd, sizeof(int), euid, egid);
253	if (qstate == NULL) {
254		LOG_ERR_2("accept_connection", "can't init query_state");
255		TRACE_OUT(accept_connection);
256		return;
257	}
258
259	memset(&timeout, 0, sizeof(struct timespec));
260	EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT,
261		0, qstate->timeout.tv_sec * 1000, qstate);
262	EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT,
263		NOTE_LOWAT, qstate->kevent_watermark, qstate);
264	res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
265	if (res < 0)
266		LOG_ERR_2("accept_connection", "kevent error");
267
268	TRACE_OUT(accept_connection);
269}
270
271static void
272process_socket_event(struct kevent *event_data, struct runtime_env *env,
273	struct configuration *config)
274{
275	struct kevent	eventlist[2];
276	struct timeval	query_timeout;
277	struct timespec	kevent_timeout;
278	int	nevents;
279	int	eof_res, res;
280	ssize_t	io_res;
281	struct query_state *qstate;
282
283	TRACE_IN(process_socket_event);
284	eof_res = event_data->flags & EV_EOF ? 1 : 0;
285	res = 0;
286
287	memset(&kevent_timeout, 0, sizeof(struct timespec));
288	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE,
289		0, 0, NULL);
290	nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
291	if (nevents == -1) {
292		if (errno == ENOENT) {
293			/* the timer is already handling this event */
294			TRACE_OUT(process_socket_event);
295			return;
296		} else {
297			/* some other error happened */
298			LOG_ERR_2("process_socket_event", "kevent error, errno"
299				" is %d", errno);
300			TRACE_OUT(process_socket_event);
301			return;
302		}
303	}
304	qstate = (struct query_state *)event_data->udata;
305
306	/*
307	 * If the buffer that is to be send/received is too large,
308	 * we send it implicitly, by using query_io_buffer_read and
309	 * query_io_buffer_write functions in the query_state. These functions
310	 * use the temporary buffer, which is later send/received in parts.
311	 * The code below implements buffer splitting/mergind for send/receive
312	 * operations. It also does the actual socket IO operations.
313	 */
314	if (((qstate->use_alternate_io == 0) &&
315		(qstate->kevent_watermark <= (size_t)event_data->data)) ||
316		((qstate->use_alternate_io != 0) &&
317		(qstate->io_buffer_watermark <= (size_t)event_data->data))) {
318		if (qstate->use_alternate_io != 0) {
319			switch (qstate->io_buffer_filter) {
320			case EVFILT_READ:
321				io_res = query_socket_read(qstate,
322					qstate->io_buffer_p,
323					qstate->io_buffer_watermark);
324				if (io_res < 0) {
325					qstate->use_alternate_io = 0;
326					qstate->process_func = NULL;
327				} else {
328					qstate->io_buffer_p += io_res;
329					if (qstate->io_buffer_p ==
330					    	qstate->io_buffer +
331						qstate->io_buffer_size) {
332						qstate->io_buffer_p =
333						    qstate->io_buffer;
334						qstate->use_alternate_io = 0;
335					}
336				}
337			break;
338			default:
339			break;
340			}
341		}
342
343		if (qstate->use_alternate_io == 0) {
344			do {
345				res = qstate->process_func(qstate);
346			} while ((qstate->kevent_watermark == 0) &&
347					(qstate->process_func != NULL) &&
348					(res == 0));
349
350			if (res != 0)
351				qstate->process_func = NULL;
352		}
353
354		if ((qstate->use_alternate_io != 0) &&
355			(qstate->io_buffer_filter == EVFILT_WRITE)) {
356			io_res = query_socket_write(qstate, qstate->io_buffer_p,
357				qstate->io_buffer_watermark);
358			if (io_res < 0) {
359				qstate->use_alternate_io = 0;
360				qstate->process_func = NULL;
361			} else
362				qstate->io_buffer_p += io_res;
363		}
364	} else {
365		/* assuming that socket was closed */
366		qstate->process_func = NULL;
367		qstate->use_alternate_io = 0;
368	}
369
370	if (((qstate->process_func == NULL) &&
371	    	(qstate->use_alternate_io == 0)) ||
372		(eof_res != 0) || (res != 0)) {
373		destroy_query_state(qstate);
374		close(event_data->ident);
375		TRACE_OUT(process_socket_event);
376		return;
377	}
378
379	/* updating the query_state lifetime variable */
380	get_time_func(&query_timeout);
381	query_timeout.tv_usec = 0;
382	query_timeout.tv_sec -= qstate->creation_time.tv_sec;
383	if (query_timeout.tv_sec > qstate->timeout.tv_sec)
384		query_timeout.tv_sec = 0;
385	else
386		query_timeout.tv_sec = qstate->timeout.tv_sec -
387			query_timeout.tv_sec;
388
389	if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p ==
390		qstate->io_buffer + qstate->io_buffer_size))
391		qstate->use_alternate_io = 0;
392
393	if (qstate->use_alternate_io == 0) {
394		/*
395		 * If we must send/receive the large block of data,
396		 * we should prepare the query_state's io_XXX fields.
397		 * We should also substitute its write_func and read_func
398		 * with the query_io_buffer_write and query_io_buffer_read,
399		 * which will allow us to implicitly send/receive this large
400		 * buffer later (in the subsequent calls to the
401		 * process_socket_event).
402		 */
403		if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
404#if 0
405			/*
406			 * XXX: Uncommenting this code makes nscd(8) fail for
407			 *      entries larger than a few kB, causing few second
408			 *      worth of delay for each call to retrieve them.
409			 */
410			if (qstate->io_buffer != NULL)
411				free(qstate->io_buffer);
412
413			qstate->io_buffer = calloc(1,
414				qstate->kevent_watermark);
415			assert(qstate->io_buffer != NULL);
416
417			qstate->io_buffer_p = qstate->io_buffer;
418			qstate->io_buffer_size = qstate->kevent_watermark;
419			qstate->io_buffer_filter = qstate->kevent_filter;
420
421			qstate->write_func = query_io_buffer_write;
422			qstate->read_func = query_io_buffer_read;
423
424			if (qstate->kevent_filter == EVFILT_READ)
425				qstate->use_alternate_io = 1;
426#endif
427
428			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
429			EV_SET(&eventlist[1], event_data->ident,
430				qstate->kevent_filter, EV_ADD | EV_ONESHOT,
431				NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
432		} else {
433			EV_SET(&eventlist[1], event_data->ident,
434		    		qstate->kevent_filter, EV_ADD | EV_ONESHOT,
435		    		NOTE_LOWAT, qstate->kevent_watermark, qstate);
436		}
437	} else {
438		if (qstate->io_buffer + qstate->io_buffer_size -
439		    	qstate->io_buffer_p <
440			MAX_SOCKET_IO_SIZE) {
441			qstate->io_buffer_watermark = qstate->io_buffer +
442				qstate->io_buffer_size - qstate->io_buffer_p;
443			EV_SET(&eventlist[1], event_data->ident,
444			    	qstate->io_buffer_filter,
445				EV_ADD | EV_ONESHOT, NOTE_LOWAT,
446				qstate->io_buffer_watermark,
447				qstate);
448		} else {
449			qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
450			EV_SET(&eventlist[1], event_data->ident,
451		    		qstate->io_buffer_filter, EV_ADD | EV_ONESHOT,
452		    		NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
453		}
454	}
455	EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER,
456		EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate);
457	kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
458
459	TRACE_OUT(process_socket_event);
460}
461
462/*
463 * This routine is called if timer event has been signaled in the kqueue. It
464 * just closes the socket and destroys the query_state.
465 */
466static void
467process_timer_event(struct kevent *event_data, struct runtime_env *env,
468	struct configuration *config)
469{
470	struct query_state	*qstate;
471
472	TRACE_IN(process_timer_event);
473	qstate = (struct query_state *)event_data->udata;
474	destroy_query_state(qstate);
475	close(event_data->ident);
476	TRACE_OUT(process_timer_event);
477}
478
479/*
480 * Processing loop is the basic processing routine, that forms a body of each
481 * procssing thread
482 */
483static void
484processing_loop(cache the_cache, struct runtime_env *env,
485	struct configuration *config)
486{
487	struct timespec timeout;
488	const int eventlist_size = 1;
489	struct kevent eventlist[eventlist_size];
490	int nevents, i;
491
492	TRACE_MSG("=> processing_loop");
493	memset(&timeout, 0, sizeof(struct timespec));
494	memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size);
495
496	for (;;) {
497		nevents = kevent(env->queue, NULL, 0, eventlist,
498	    		eventlist_size, NULL);
499		/*
500		 * we can only receive 1 event on success
501		 */
502		if (nevents == 1) {
503			struct kevent *event_data;
504			event_data = &eventlist[0];
505
506			if ((int)event_data->ident == env->sockfd) {
507				for (i = 0; i < event_data->data; ++i)
508				    accept_connection(event_data, env, config);
509
510				EV_SET(eventlist, s_runtime_env->sockfd,
511				    EVFILT_READ, EV_ADD | EV_ONESHOT,
512				    0, 0, 0);
513				memset(&timeout, 0,
514				    sizeof(struct timespec));
515				kevent(s_runtime_env->queue, eventlist,
516				    1, NULL, 0, &timeout);
517
518			} else {
519				switch (event_data->filter) {
520				case EVFILT_READ:
521				case EVFILT_WRITE:
522					process_socket_event(event_data,
523						env, config);
524					break;
525				case EVFILT_TIMER:
526					process_timer_event(event_data,
527						env, config);
528					break;
529				default:
530					break;
531				}
532			}
533		} else {
534			/* this branch shouldn't be currently executed */
535		}
536	}
537
538	TRACE_MSG("<= processing_loop");
539}
540
541/*
542 * Wrapper above the processing loop function. It sets the thread signal mask
543 * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
544 */
545static void *
546processing_thread(void *data)
547{
548	struct processing_thread_args	*args;
549	sigset_t new;
550
551	TRACE_MSG("=> processing_thread");
552	args = (struct processing_thread_args *)data;
553
554	sigemptyset(&new);
555	sigaddset(&new, SIGPIPE);
556	if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0)
557		LOG_ERR_1("processing thread",
558			"thread can't block the SIGPIPE signal");
559
560	processing_loop(args->the_cache, args->the_runtime_env,
561		args->the_configuration);
562	free(args);
563	TRACE_MSG("<= processing_thread");
564
565	return (NULL);
566}
567
568void
569get_time_func(struct timeval *time)
570{
571	struct timespec res;
572	memset(&res, 0, sizeof(struct timespec));
573	clock_gettime(CLOCK_MONOTONIC, &res);
574
575	time->tv_sec = res.tv_sec;
576	time->tv_usec = 0;
577}
578
579/*
580 * The idea of _nss_cache_cycle_prevention_function is that nsdispatch
581 * will search for this symbol in the executable. This symbol is the
582 * attribute of the caching daemon. So, if it exists, nsdispatch won't try
583 * to connect to the caching daemon and will just ignore the 'cache'
584 * source in the nsswitch.conf. This method helps to avoid cycles and
585 * organize self-performing requests.
586 *
587 * (not actually a function; it used to be, but it doesn't make any
588 * difference, as long as it has external linkage)
589 */
590void *_nss_cache_cycle_prevention_function;
591
592int
593main(int argc, char *argv[])
594{
595	struct processing_thread_args *thread_args;
596	pthread_t *threads;
597
598	struct pidfh *pidfile;
599	pid_t pid;
600
601	char const *config_file;
602	char const *error_str;
603	int error_line;
604	int i, res;
605
606	int trace_mode_enabled;
607	int force_single_threaded;
608	int do_not_daemonize;
609	int clear_user_cache_entries, clear_all_cache_entries;
610	char *user_config_entry_name, *global_config_entry_name;
611	int show_statistics;
612	int daemon_mode, interactive_mode;
613
614
615	/* by default all debug messages are omitted */
616	TRACE_OFF();
617
618	/* parsing command line arguments */
619	trace_mode_enabled = 0;
620	force_single_threaded = 0;
621	do_not_daemonize = 0;
622	clear_user_cache_entries = 0;
623	clear_all_cache_entries = 0;
624	show_statistics = 0;
625	user_config_entry_name = NULL;
626	global_config_entry_name = NULL;
627	while ((res = getopt(argc, argv, "nstdi:I:")) != -1) {
628		switch (res) {
629		case 'n':
630			do_not_daemonize = 1;
631			break;
632		case 's':
633			force_single_threaded = 1;
634			break;
635		case 't':
636			trace_mode_enabled = 1;
637			break;
638		case 'i':
639			clear_user_cache_entries = 1;
640			if (optarg != NULL)
641				if (strcmp(optarg, "all") != 0)
642					user_config_entry_name = strdup(optarg);
643			break;
644		case 'I':
645			clear_all_cache_entries = 1;
646			if (optarg != NULL)
647				if (strcmp(optarg, "all") != 0)
648					global_config_entry_name =
649						strdup(optarg);
650			break;
651		case 'd':
652			show_statistics = 1;
653			break;
654		case '?':
655		default:
656			usage();
657			/* NOT REACHED */
658		}
659	}
660
661	daemon_mode = do_not_daemonize | force_single_threaded |
662		trace_mode_enabled;
663	interactive_mode = clear_user_cache_entries | clear_all_cache_entries |
664		show_statistics;
665
666	if ((daemon_mode != 0) && (interactive_mode != 0)) {
667		LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
668			"can't be used together");
669		usage();
670	}
671
672	if (interactive_mode != 0) {
673		FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r");
674		char pidbuf[256];
675
676		struct nscd_connection_params connection_params;
677		nscd_connection connection;
678
679		int result;
680
681		if (pidfin == NULL)
682			errx(EXIT_FAILURE, "There is no daemon running.");
683
684		memset(pidbuf, 0, sizeof(pidbuf));
685		fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin);
686		fclose(pidfin);
687
688		if (ferror(pidfin) != 0)
689			errx(EXIT_FAILURE, "Can't read from pidfile.");
690
691		if (sscanf(pidbuf, "%d", &pid) != 1)
692			errx(EXIT_FAILURE, "Invalid pidfile.");
693		LOG_MSG_1("main", "daemon PID is %d", pid);
694
695
696		memset(&connection_params, 0,
697			sizeof(struct nscd_connection_params));
698		connection_params.socket_path = DEFAULT_SOCKET_PATH;
699		connection = open_nscd_connection__(&connection_params);
700		if (connection == INVALID_NSCD_CONNECTION)
701			errx(EXIT_FAILURE, "Can't connect to the daemon.");
702
703		if (clear_user_cache_entries != 0) {
704			result = nscd_transform__(connection,
705				user_config_entry_name, TT_USER);
706			if (result != 0)
707				LOG_MSG_1("main",
708					"user cache transformation failed");
709			else
710				LOG_MSG_1("main",
711					"user cache_transformation "
712					"succeeded");
713		}
714
715		if (clear_all_cache_entries != 0) {
716			if (geteuid() != 0)
717				errx(EXIT_FAILURE, "Only root can initiate "
718					"global cache transformation.");
719
720			result = nscd_transform__(connection,
721				global_config_entry_name, TT_ALL);
722			if (result != 0)
723				LOG_MSG_1("main",
724					"global cache transformation "
725					"failed");
726			else
727				LOG_MSG_1("main",
728					"global cache transformation "
729					"succeeded");
730		}
731
732		close_nscd_connection__(connection);
733
734		free(user_config_entry_name);
735		free(global_config_entry_name);
736		return (EXIT_SUCCESS);
737	}
738
739	pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid);
740	if (pidfile == NULL) {
741		if (errno == EEXIST)
742			errx(EXIT_FAILURE, "Daemon already running, pid: %d.",
743				pid);
744		warn("Cannot open or create pidfile");
745	}
746
747	if (trace_mode_enabled == 1)
748		TRACE_ON();
749
750	/* blocking the main thread from receiving SIGPIPE signal */
751	sigblock(sigmask(SIGPIPE));
752
753	/* daemonization */
754	if (do_not_daemonize == 0) {
755		res = daemon(0, trace_mode_enabled == 0 ? 0 : 1);
756		if (res != 0) {
757			LOG_ERR_1("main", "can't daemonize myself: %s",
758		    		strerror(errno));
759			pidfile_remove(pidfile);
760			goto fin;
761		} else
762			LOG_MSG_1("main", "successfully daemonized");
763	}
764
765	pidfile_write(pidfile);
766
767	s_agent_table = init_agent_table();
768	register_agent(s_agent_table, init_passwd_agent());
769	register_agent(s_agent_table, init_passwd_mp_agent());
770	register_agent(s_agent_table, init_group_agent());
771	register_agent(s_agent_table, init_group_mp_agent());
772	register_agent(s_agent_table, init_services_agent());
773	register_agent(s_agent_table, init_services_mp_agent());
774	LOG_MSG_1("main", "request agents registered successfully");
775
776	/*
777 	 * Hosts agent can't work properly until we have access to the
778	 * appropriate dtab structures, which are used in nsdispatch
779	 * calls
780	 *
781	 register_agent(s_agent_table, init_hosts_agent());
782	*/
783
784	/* configuration initialization */
785	s_configuration = init_configuration();
786	fill_configuration_defaults(s_configuration);
787
788	error_str = NULL;
789	error_line = 0;
790	config_file = CONFIG_PATH;
791
792	res = parse_config_file(s_configuration, config_file, &error_str,
793		&error_line);
794	if ((res != 0) && (error_str == NULL)) {
795		config_file = DEFAULT_CONFIG_PATH;
796		res = parse_config_file(s_configuration, config_file,
797			&error_str, &error_line);
798	}
799
800	if (res != 0) {
801		if (error_str != NULL) {
802		LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
803			config_file, error_line, error_str);
804		} else {
805		LOG_ERR_1("main", "no configuration file found "
806		    	"- was looking for %s and %s",
807			CONFIG_PATH, DEFAULT_CONFIG_PATH);
808		}
809		destroy_configuration(s_configuration);
810		return (-1);
811	}
812
813	if (force_single_threaded == 1)
814		s_configuration->threads_num = 1;
815
816	/* cache initialization */
817	s_cache = init_cache_(s_configuration);
818	if (s_cache == NULL) {
819		LOG_ERR_1("main", "can't initialize the cache");
820		destroy_configuration(s_configuration);
821		return (-1);
822	}
823
824	/* runtime environment initialization */
825	s_runtime_env = init_runtime_env(s_configuration);
826	if (s_runtime_env == NULL) {
827		LOG_ERR_1("main", "can't initialize the runtime environment");
828		destroy_configuration(s_configuration);
829		destroy_cache_(s_cache);
830		return (-1);
831	}
832
833	if (s_configuration->threads_num > 1) {
834		threads = calloc(s_configuration->threads_num,
835			sizeof(*threads));
836		for (i = 0; i < s_configuration->threads_num; ++i) {
837			thread_args = malloc(
838				sizeof(*thread_args));
839			thread_args->the_cache = s_cache;
840			thread_args->the_runtime_env = s_runtime_env;
841			thread_args->the_configuration = s_configuration;
842
843			LOG_MSG_1("main", "thread #%d was successfully created",
844				i);
845			pthread_create(&threads[i], NULL, processing_thread,
846				thread_args);
847
848			thread_args = NULL;
849		}
850
851		for (i = 0; i < s_configuration->threads_num; ++i)
852			pthread_join(threads[i], NULL);
853	} else {
854		LOG_MSG_1("main", "working in single-threaded mode");
855		processing_loop(s_cache, s_runtime_env, s_configuration);
856	}
857
858fin:
859	/* runtime environment destruction */
860	destroy_runtime_env(s_runtime_env);
861
862	/* cache destruction */
863	destroy_cache_(s_cache);
864
865	/* configuration destruction */
866	destroy_configuration(s_configuration);
867
868	/* agents table destruction */
869	destroy_agent_table(s_agent_table);
870
871	pidfile_remove(pidfile);
872	return (EXIT_SUCCESS);
873}
874