1/*-
2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31#include <sys/types.h>
32#include <sys/event.h>
33#include <sys/socket.h>
34#include <sys/time.h>
35
36#include <assert.h>
37#include <errno.h>
38#include <nsswitch.h>
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <unistd.h>
43
44#include "config.h"
45#include "debug.h"
46#include "query.h"
47#include "log.h"
48#include "mp_ws_query.h"
49#include "mp_rs_query.h"
50#include "singletons.h"
51
52static const char negative_data[1] = { 0 };
53
54extern	void get_time_func(struct timeval *);
55
56static 	void clear_config_entry(struct configuration_entry *);
57static 	void clear_config_entry_part(struct configuration_entry *,
58	const char *, size_t);
59
60static	int on_query_startup(struct query_state *);
61static	void on_query_destroy(struct query_state *);
62
63static	int on_read_request_read1(struct query_state *);
64static	int on_read_request_read2(struct query_state *);
65static	int on_read_request_process(struct query_state *);
66static	int on_read_response_write1(struct query_state *);
67static	int on_read_response_write2(struct query_state *);
68
69static	int on_rw_mapper(struct query_state *);
70
71static	int on_transform_request_read1(struct query_state *);
72static	int on_transform_request_read2(struct query_state *);
73static	int on_transform_request_process(struct query_state *);
74static	int on_transform_response_write1(struct query_state *);
75
76static	int on_write_request_read1(struct query_state *);
77static	int on_write_request_read2(struct query_state *);
78static	int on_negative_write_request_process(struct query_state *);
79static	int on_write_request_process(struct query_state *);
80static	int on_write_response_write1(struct query_state *);
81
82/*
83 * Clears the specified configuration entry (clears the cache for positive and
84 * and negative entries) and also for all multipart entries.
85 */
86static void
87clear_config_entry(struct configuration_entry *config_entry)
88{
89	size_t i;
90
91	TRACE_IN(clear_config_entry);
92	configuration_lock_entry(config_entry, CELT_POSITIVE);
93	if (config_entry->positive_cache_entry != NULL)
94		transform_cache_entry(
95			config_entry->positive_cache_entry,
96			CTT_CLEAR);
97	configuration_unlock_entry(config_entry, CELT_POSITIVE);
98
99	configuration_lock_entry(config_entry, CELT_NEGATIVE);
100	if (config_entry->negative_cache_entry != NULL)
101		transform_cache_entry(
102			config_entry->negative_cache_entry,
103			CTT_CLEAR);
104	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
105
106	configuration_lock_entry(config_entry, CELT_MULTIPART);
107	for (i = 0; i < config_entry->mp_cache_entries_size; ++i)
108		transform_cache_entry(
109			config_entry->mp_cache_entries[i],
110			CTT_CLEAR);
111	configuration_unlock_entry(config_entry, CELT_MULTIPART);
112
113	TRACE_OUT(clear_config_entry);
114}
115
116/*
117 * Clears the specified configuration entry by deleting only the elements,
118 * that are owned by the user with specified eid_str.
119 */
120static void
121clear_config_entry_part(struct configuration_entry *config_entry,
122	const char *eid_str, size_t eid_str_length)
123{
124	cache_entry *start, *finish, *mp_entry;
125	TRACE_IN(clear_config_entry_part);
126	configuration_lock_entry(config_entry, CELT_POSITIVE);
127	if (config_entry->positive_cache_entry != NULL)
128		transform_cache_entry_part(
129			config_entry->positive_cache_entry,
130			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
131	configuration_unlock_entry(config_entry, CELT_POSITIVE);
132
133	configuration_lock_entry(config_entry, CELT_NEGATIVE);
134	if (config_entry->negative_cache_entry != NULL)
135		transform_cache_entry_part(
136			config_entry->negative_cache_entry,
137			CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
138	configuration_unlock_entry(config_entry, CELT_NEGATIVE);
139
140	configuration_lock_entry(config_entry, CELT_MULTIPART);
141	if (configuration_entry_find_mp_cache_entries(config_entry,
142		eid_str, &start, &finish) == 0) {
143		for (mp_entry = start; mp_entry != finish; ++mp_entry)
144			transform_cache_entry(*mp_entry, CTT_CLEAR);
145	}
146	configuration_unlock_entry(config_entry, CELT_MULTIPART);
147
148	TRACE_OUT(clear_config_entry_part);
149}
150
151/*
152 * This function is assigned to the query_state structue on its creation.
153 * It's main purpose is to receive credentials from the client.
154 */
155static int
156on_query_startup(struct query_state *qstate)
157{
158	struct msghdr	cred_hdr;
159	struct iovec	iov;
160	struct cmsgcred *cred;
161	int elem_type;
162
163	struct {
164		struct cmsghdr	hdr;
165		char cred[CMSG_SPACE(sizeof(struct cmsgcred))];
166	} cmsg;
167
168	TRACE_IN(on_query_startup);
169	assert(qstate != NULL);
170
171	memset(&cred_hdr, 0, sizeof(struct msghdr));
172	cred_hdr.msg_iov = &iov;
173	cred_hdr.msg_iovlen = 1;
174	cred_hdr.msg_control = (caddr_t)&cmsg;
175	cred_hdr.msg_controllen = CMSG_LEN(sizeof(struct cmsgcred));
176
177	memset(&iov, 0, sizeof(struct iovec));
178	iov.iov_base = &elem_type;
179	iov.iov_len = sizeof(int);
180
181	if (recvmsg(qstate->sockfd, &cred_hdr, 0) == -1) {
182		TRACE_OUT(on_query_startup);
183		return (-1);
184	}
185
186	if (cmsg.hdr.cmsg_len < CMSG_LEN(sizeof(struct cmsgcred))
187		|| cmsg.hdr.cmsg_level != SOL_SOCKET
188		|| cmsg.hdr.cmsg_type != SCM_CREDS) {
189		TRACE_OUT(on_query_startup);
190		return (-1);
191	}
192
193	cred = (struct cmsgcred *)CMSG_DATA(&cmsg);
194	qstate->uid = cred->cmcred_uid;
195	qstate->gid = cred->cmcred_gid;
196
197#if defined(NS_NSCD_EID_CHECKING) || defined(NS_STRICT_NSCD_EID_CHECKING)
198/*
199 * This check is probably a bit redundant - per-user cache is always separated
200 * by the euid/egid pair
201 */
202	if (check_query_eids(qstate) != 0) {
203#ifdef NS_STRICT_NSCD_EID_CHECKING
204		TRACE_OUT(on_query_startup);
205		return (-1);
206#else
207		if ((elem_type != CET_READ_REQUEST) &&
208			(elem_type != CET_MP_READ_SESSION_REQUEST) &&
209			(elem_type != CET_WRITE_REQUEST) &&
210			(elem_type != CET_MP_WRITE_SESSION_REQUEST)) {
211			TRACE_OUT(on_query_startup);
212			return (-1);
213		}
214#endif
215	}
216#endif
217
218	switch (elem_type) {
219	case CET_WRITE_REQUEST:
220		qstate->process_func = on_write_request_read1;
221		break;
222	case CET_READ_REQUEST:
223		qstate->process_func = on_read_request_read1;
224		break;
225	case CET_TRANSFORM_REQUEST:
226		qstate->process_func = on_transform_request_read1;
227		break;
228	case CET_MP_WRITE_SESSION_REQUEST:
229		qstate->process_func = on_mp_write_session_request_read1;
230		break;
231	case CET_MP_READ_SESSION_REQUEST:
232		qstate->process_func = on_mp_read_session_request_read1;
233		break;
234	default:
235		TRACE_OUT(on_query_startup);
236		return (-1);
237	}
238
239	qstate->kevent_watermark = 0;
240	TRACE_OUT(on_query_startup);
241	return (0);
242}
243
244/*
245 * on_rw_mapper is used to process multiple read/write requests during
246 * one connection session. It's never called in the beginning (on query_state
247 * creation) as it does not process the multipart requests and does not
248 * receive credentials
249 */
250static int
251on_rw_mapper(struct query_state *qstate)
252{
253	ssize_t	result;
254	int	elem_type;
255
256	TRACE_IN(on_rw_mapper);
257	if (qstate->kevent_watermark == 0) {
258		qstate->kevent_watermark = sizeof(int);
259	} else {
260		result = qstate->read_func(qstate, &elem_type, sizeof(int));
261		if (result != sizeof(int)) {
262			TRACE_OUT(on_rw_mapper);
263			return (-1);
264		}
265
266		switch (elem_type) {
267		case CET_WRITE_REQUEST:
268			qstate->kevent_watermark = sizeof(size_t);
269			qstate->process_func = on_write_request_read1;
270		break;
271		case CET_READ_REQUEST:
272			qstate->kevent_watermark = sizeof(size_t);
273			qstate->process_func = on_read_request_read1;
274		break;
275		default:
276			TRACE_OUT(on_rw_mapper);
277			return (-1);
278		break;
279		}
280	}
281	TRACE_OUT(on_rw_mapper);
282	return (0);
283}
284
285/*
286 * The default query_destroy function
287 */
288static void
289on_query_destroy(struct query_state *qstate)
290{
291
292	TRACE_IN(on_query_destroy);
293	finalize_comm_element(&qstate->response);
294	finalize_comm_element(&qstate->request);
295	TRACE_OUT(on_query_destroy);
296}
297
298/*
299 * The functions below are used to process write requests.
300 * - on_write_request_read1 and on_write_request_read2 read the request itself
301 * - on_write_request_process processes it (if the client requests to
302 *    cache the negative result, the on_negative_write_request_process is used)
303 * - on_write_response_write1 sends the response
304 */
305static int
306on_write_request_read1(struct query_state *qstate)
307{
308	struct cache_write_request	*write_request;
309	ssize_t	result;
310
311	TRACE_IN(on_write_request_read1);
312	if (qstate->kevent_watermark == 0)
313		qstate->kevent_watermark = sizeof(size_t) * 3;
314	else {
315		init_comm_element(&qstate->request, CET_WRITE_REQUEST);
316		write_request = get_cache_write_request(&qstate->request);
317
318		result = qstate->read_func(qstate, &write_request->entry_length,
319	    		sizeof(size_t));
320		result += qstate->read_func(qstate,
321	    		&write_request->cache_key_size, sizeof(size_t));
322		result += qstate->read_func(qstate,
323	    		&write_request->data_size, sizeof(size_t));
324
325		if (result != sizeof(size_t) * 3) {
326			TRACE_OUT(on_write_request_read1);
327			return (-1);
328		}
329
330		if (BUFSIZE_INVALID(write_request->entry_length) ||
331			BUFSIZE_INVALID(write_request->cache_key_size) ||
332			(BUFSIZE_INVALID(write_request->data_size) &&
333			(write_request->data_size != 0))) {
334			TRACE_OUT(on_write_request_read1);
335			return (-1);
336		}
337
338		write_request->entry = calloc(1,
339			write_request->entry_length + 1);
340		assert(write_request->entry != NULL);
341
342		write_request->cache_key = calloc(1,
343			write_request->cache_key_size +
344			qstate->eid_str_length);
345		assert(write_request->cache_key != NULL);
346		memcpy(write_request->cache_key, qstate->eid_str,
347			qstate->eid_str_length);
348
349		if (write_request->data_size != 0) {
350			write_request->data = calloc(1,
351				write_request->data_size);
352			assert(write_request->data != NULL);
353		}
354
355		qstate->kevent_watermark = write_request->entry_length +
356			write_request->cache_key_size +
357			write_request->data_size;
358		qstate->process_func = on_write_request_read2;
359	}
360
361	TRACE_OUT(on_write_request_read1);
362	return (0);
363}
364
365static int
366on_write_request_read2(struct query_state *qstate)
367{
368	struct cache_write_request	*write_request;
369	ssize_t	result;
370
371	TRACE_IN(on_write_request_read2);
372	write_request = get_cache_write_request(&qstate->request);
373
374	result = qstate->read_func(qstate, write_request->entry,
375		write_request->entry_length);
376	result += qstate->read_func(qstate, write_request->cache_key +
377		qstate->eid_str_length, write_request->cache_key_size);
378	if (write_request->data_size != 0)
379		result += qstate->read_func(qstate, write_request->data,
380			write_request->data_size);
381
382	if (result != (ssize_t)qstate->kevent_watermark) {
383		TRACE_OUT(on_write_request_read2);
384		return (-1);
385	}
386	write_request->cache_key_size += qstate->eid_str_length;
387
388	qstate->kevent_watermark = 0;
389	if (write_request->data_size != 0)
390		qstate->process_func = on_write_request_process;
391	else
392	    	qstate->process_func = on_negative_write_request_process;
393	TRACE_OUT(on_write_request_read2);
394	return (0);
395}
396
397static	int
398on_write_request_process(struct query_state *qstate)
399{
400	struct cache_write_request	*write_request;
401	struct cache_write_response	*write_response;
402	cache_entry c_entry;
403
404	TRACE_IN(on_write_request_process);
405	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
406	write_response = get_cache_write_response(&qstate->response);
407	write_request = get_cache_write_request(&qstate->request);
408
409	qstate->config_entry = configuration_find_entry(
410		s_configuration, write_request->entry);
411
412	if (qstate->config_entry == NULL) {
413		write_response->error_code = ENOENT;
414
415		LOG_ERR_2("write_request", "can't find configuration"
416		    " entry '%s'. aborting request", write_request->entry);
417		goto fin;
418	}
419
420	if (qstate->config_entry->enabled == 0) {
421		write_response->error_code = EACCES;
422
423		LOG_ERR_2("write_request",
424			"configuration entry '%s' is disabled",
425			write_request->entry);
426		goto fin;
427	}
428
429	if (qstate->config_entry->perform_actual_lookups != 0) {
430		write_response->error_code = EOPNOTSUPP;
431
432		LOG_ERR_2("write_request",
433			"entry '%s' performs lookups by itself: "
434			"can't write to it", write_request->entry);
435		goto fin;
436	}
437
438	configuration_lock_rdlock(s_configuration);
439	c_entry = find_cache_entry(s_cache,
440		qstate->config_entry->positive_cache_params.cep.entry_name);
441	configuration_unlock(s_configuration);
442	if (c_entry != NULL) {
443		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
444		qstate->config_entry->positive_cache_entry = c_entry;
445		write_response->error_code = cache_write(c_entry,
446			write_request->cache_key,
447	    		write_request->cache_key_size,
448	    		write_request->data,
449			write_request->data_size);
450		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
451
452		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
453		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
454			memcpy(&qstate->timeout,
455				&qstate->config_entry->common_query_timeout,
456				sizeof(struct timeval));
457
458	} else
459		write_response->error_code = -1;
460
461fin:
462	qstate->kevent_filter = EVFILT_WRITE;
463	qstate->kevent_watermark = sizeof(int);
464	qstate->process_func = on_write_response_write1;
465
466	TRACE_OUT(on_write_request_process);
467	return (0);
468}
469
470static int
471on_negative_write_request_process(struct query_state *qstate)
472{
473	struct cache_write_request	*write_request;
474	struct cache_write_response	*write_response;
475	cache_entry c_entry;
476
477	TRACE_IN(on_negative_write_request_process);
478	init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
479	write_response = get_cache_write_response(&qstate->response);
480	write_request = get_cache_write_request(&qstate->request);
481
482	qstate->config_entry = configuration_find_entry	(
483		s_configuration, write_request->entry);
484
485	if (qstate->config_entry == NULL) {
486		write_response->error_code = ENOENT;
487
488		LOG_ERR_2("negative_write_request",
489			"can't find configuration"
490		   	" entry '%s'. aborting request", write_request->entry);
491		goto fin;
492	}
493
494	if (qstate->config_entry->enabled == 0) {
495		write_response->error_code = EACCES;
496
497		LOG_ERR_2("negative_write_request",
498			"configuration entry '%s' is disabled",
499			write_request->entry);
500		goto fin;
501	}
502
503	if (qstate->config_entry->perform_actual_lookups != 0) {
504		write_response->error_code = EOPNOTSUPP;
505
506		LOG_ERR_2("negative_write_request",
507			"entry '%s' performs lookups by itself: "
508			"can't write to it", write_request->entry);
509		goto fin;
510	} else {
511#ifdef NS_NSCD_EID_CHECKING
512		if (check_query_eids(qstate) != 0) {
513			write_response->error_code = EPERM;
514			goto fin;
515		}
516#endif
517	}
518
519	configuration_lock_rdlock(s_configuration);
520	c_entry = find_cache_entry(s_cache,
521		qstate->config_entry->negative_cache_params.cep.entry_name);
522	configuration_unlock(s_configuration);
523	if (c_entry != NULL) {
524		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
525		qstate->config_entry->negative_cache_entry = c_entry;
526		write_response->error_code = cache_write(c_entry,
527			write_request->cache_key,
528	    		write_request->cache_key_size,
529	    		negative_data,
530			sizeof(negative_data));
531		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
532
533		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
534		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
535			memcpy(&qstate->timeout,
536				&qstate->config_entry->common_query_timeout,
537				sizeof(struct timeval));
538	} else
539		write_response->error_code = -1;
540
541fin:
542	qstate->kevent_filter = EVFILT_WRITE;
543	qstate->kevent_watermark = sizeof(int);
544	qstate->process_func = on_write_response_write1;
545
546	TRACE_OUT(on_negative_write_request_process);
547	return (0);
548}
549
550static int
551on_write_response_write1(struct query_state *qstate)
552{
553	struct cache_write_response	*write_response;
554	ssize_t	result;
555
556	TRACE_IN(on_write_response_write1);
557	write_response = get_cache_write_response(&qstate->response);
558	result = qstate->write_func(qstate, &write_response->error_code,
559		sizeof(int));
560	if (result != sizeof(int)) {
561		TRACE_OUT(on_write_response_write1);
562		return (-1);
563	}
564
565	finalize_comm_element(&qstate->request);
566	finalize_comm_element(&qstate->response);
567
568	qstate->kevent_watermark = sizeof(int);
569	qstate->kevent_filter = EVFILT_READ;
570	qstate->process_func = on_rw_mapper;
571
572	TRACE_OUT(on_write_response_write1);
573	return (0);
574}
575
576/*
577 * The functions below are used to process read requests.
578 * - on_read_request_read1 and on_read_request_read2 read the request itself
579 * - on_read_request_process processes it
580 * - on_read_response_write1 and on_read_response_write2 send the response
581 */
582static int
583on_read_request_read1(struct query_state *qstate)
584{
585	struct cache_read_request *read_request;
586	ssize_t	result;
587
588	TRACE_IN(on_read_request_read1);
589	if (qstate->kevent_watermark == 0)
590		qstate->kevent_watermark = sizeof(size_t) * 2;
591	else {
592		init_comm_element(&qstate->request, CET_READ_REQUEST);
593		read_request = get_cache_read_request(&qstate->request);
594
595		result = qstate->read_func(qstate,
596	    		&read_request->entry_length, sizeof(size_t));
597		result += qstate->read_func(qstate,
598	    		&read_request->cache_key_size, sizeof(size_t));
599
600		if (result != sizeof(size_t) * 2) {
601			TRACE_OUT(on_read_request_read1);
602			return (-1);
603		}
604
605		if (BUFSIZE_INVALID(read_request->entry_length) ||
606			BUFSIZE_INVALID(read_request->cache_key_size)) {
607			TRACE_OUT(on_read_request_read1);
608			return (-1);
609		}
610
611		read_request->entry = calloc(1,
612			read_request->entry_length + 1);
613		assert(read_request->entry != NULL);
614
615		read_request->cache_key = calloc(1,
616			read_request->cache_key_size +
617			qstate->eid_str_length);
618		assert(read_request->cache_key != NULL);
619		memcpy(read_request->cache_key, qstate->eid_str,
620			qstate->eid_str_length);
621
622		qstate->kevent_watermark = read_request->entry_length +
623			read_request->cache_key_size;
624		qstate->process_func = on_read_request_read2;
625	}
626
627	TRACE_OUT(on_read_request_read1);
628	return (0);
629}
630
631static int
632on_read_request_read2(struct query_state *qstate)
633{
634	struct cache_read_request	*read_request;
635	ssize_t	result;
636
637	TRACE_IN(on_read_request_read2);
638	read_request = get_cache_read_request(&qstate->request);
639
640	result = qstate->read_func(qstate, read_request->entry,
641		read_request->entry_length);
642	result += qstate->read_func(qstate,
643		read_request->cache_key + qstate->eid_str_length,
644		read_request->cache_key_size);
645
646	if (result != (ssize_t)qstate->kevent_watermark) {
647		TRACE_OUT(on_read_request_read2);
648		return (-1);
649	}
650	read_request->cache_key_size += qstate->eid_str_length;
651
652	qstate->kevent_watermark = 0;
653	qstate->process_func = on_read_request_process;
654
655	TRACE_OUT(on_read_request_read2);
656	return (0);
657}
658
659static int
660on_read_request_process(struct query_state *qstate)
661{
662	struct cache_read_request *read_request;
663	struct cache_read_response *read_response;
664	cache_entry	c_entry, neg_c_entry;
665
666	struct agent	*lookup_agent;
667	struct common_agent *c_agent;
668	int res;
669
670	TRACE_IN(on_read_request_process);
671	init_comm_element(&qstate->response, CET_READ_RESPONSE);
672	read_response = get_cache_read_response(&qstate->response);
673	read_request = get_cache_read_request(&qstate->request);
674
675	qstate->config_entry = configuration_find_entry(
676		s_configuration, read_request->entry);
677	if (qstate->config_entry == NULL) {
678		read_response->error_code = ENOENT;
679
680		LOG_ERR_2("read_request",
681			"can't find configuration "
682	    		"entry '%s'. aborting request", read_request->entry);
683	    	goto fin;
684	}
685
686	if (qstate->config_entry->enabled == 0) {
687		read_response->error_code = EACCES;
688
689		LOG_ERR_2("read_request",
690			"configuration entry '%s' is disabled",
691			read_request->entry);
692		goto fin;
693	}
694
695	/*
696	 * if we perform lookups by ourselves, then we don't need to separate
697	 * cache entries by euid and egid
698	 */
699	if (qstate->config_entry->perform_actual_lookups != 0)
700		memset(read_request->cache_key, 0, qstate->eid_str_length);
701	else {
702#ifdef NS_NSCD_EID_CHECKING
703		if (check_query_eids(qstate) != 0) {
704		/* if the lookup is not self-performing, we check for clients euid/egid */
705			read_response->error_code = EPERM;
706			goto fin;
707		}
708#endif
709	}
710
711	configuration_lock_rdlock(s_configuration);
712	c_entry = find_cache_entry(s_cache,
713		qstate->config_entry->positive_cache_params.cep.entry_name);
714	neg_c_entry = find_cache_entry(s_cache,
715		qstate->config_entry->negative_cache_params.cep.entry_name);
716	configuration_unlock(s_configuration);
717	if ((c_entry != NULL) && (neg_c_entry != NULL)) {
718		configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
719		qstate->config_entry->positive_cache_entry = c_entry;
720		read_response->error_code = cache_read(c_entry,
721	    		read_request->cache_key,
722	    		read_request->cache_key_size, NULL,
723	    		&read_response->data_size);
724
725		if (read_response->error_code == -2) {
726			read_response->data = malloc(
727				read_response->data_size);
728			assert(read_response != NULL);
729			read_response->error_code = cache_read(c_entry,
730				read_request->cache_key,
731		    		read_request->cache_key_size,
732		    		read_response->data,
733		    		&read_response->data_size);
734		}
735		configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
736
737		configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
738		qstate->config_entry->negative_cache_entry = neg_c_entry;
739		if (read_response->error_code == -1) {
740			read_response->error_code = cache_read(neg_c_entry,
741				read_request->cache_key,
742				read_request->cache_key_size, NULL,
743				&read_response->data_size);
744
745			if (read_response->error_code == -2) {
746				read_response->error_code = 0;
747				read_response->data = NULL;
748				read_response->data_size = 0;
749			}
750		}
751		configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
752
753		if ((read_response->error_code == -1) &&
754			(qstate->config_entry->perform_actual_lookups != 0)) {
755			free(read_response->data);
756			read_response->data = NULL;
757			read_response->data_size = 0;
758
759			lookup_agent = find_agent(s_agent_table,
760				read_request->entry, COMMON_AGENT);
761
762			if ((lookup_agent != NULL) &&
763			(lookup_agent->type == COMMON_AGENT)) {
764				c_agent = (struct common_agent *)lookup_agent;
765				res = c_agent->lookup_func(
766					read_request->cache_key +
767						qstate->eid_str_length,
768					read_request->cache_key_size -
769						qstate->eid_str_length,
770					&read_response->data,
771					&read_response->data_size);
772
773				if (res == NS_SUCCESS) {
774					read_response->error_code = 0;
775					configuration_lock_entry(
776						qstate->config_entry,
777						CELT_POSITIVE);
778					cache_write(c_entry,
779						read_request->cache_key,
780	    					read_request->cache_key_size,
781	    					read_response->data,
782						read_response->data_size);
783					configuration_unlock_entry(
784						qstate->config_entry,
785						CELT_POSITIVE);
786				} else if ((res == NS_NOTFOUND) ||
787					  (res == NS_RETURN)) {
788					configuration_lock_entry(
789						  qstate->config_entry,
790						  CELT_NEGATIVE);
791					cache_write(neg_c_entry,
792						read_request->cache_key,
793						read_request->cache_key_size,
794						negative_data,
795						sizeof(negative_data));
796					configuration_unlock_entry(
797						  qstate->config_entry,
798						  CELT_NEGATIVE);
799
800					read_response->error_code = 0;
801					read_response->data = NULL;
802					read_response->data_size = 0;
803				}
804			}
805		}
806
807		if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
808		    (qstate->config_entry->common_query_timeout.tv_usec != 0))
809			memcpy(&qstate->timeout,
810				&qstate->config_entry->common_query_timeout,
811				sizeof(struct timeval));
812	} else
813		read_response->error_code = -1;
814
815fin:
816	qstate->kevent_filter = EVFILT_WRITE;
817	if (read_response->error_code == 0)
818		qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
819	else
820		qstate->kevent_watermark = sizeof(int);
821	qstate->process_func = on_read_response_write1;
822
823	TRACE_OUT(on_read_request_process);
824	return (0);
825}
826
827static int
828on_read_response_write1(struct query_state *qstate)
829{
830	struct cache_read_response	*read_response;
831	ssize_t	result;
832
833	TRACE_IN(on_read_response_write1);
834	read_response = get_cache_read_response(&qstate->response);
835
836	result = qstate->write_func(qstate, &read_response->error_code,
837		sizeof(int));
838
839	if (read_response->error_code == 0) {
840		result += qstate->write_func(qstate, &read_response->data_size,
841			sizeof(size_t));
842		if (result != (ssize_t)qstate->kevent_watermark) {
843			TRACE_OUT(on_read_response_write1);
844			return (-1);
845		}
846
847		qstate->kevent_watermark = read_response->data_size;
848		qstate->process_func = on_read_response_write2;
849	} else {
850		if (result != (ssize_t)qstate->kevent_watermark) {
851			TRACE_OUT(on_read_response_write1);
852			return (-1);
853		}
854
855		qstate->kevent_watermark = 0;
856		qstate->process_func = NULL;
857	}
858
859	TRACE_OUT(on_read_response_write1);
860	return (0);
861}
862
863static int
864on_read_response_write2(struct query_state *qstate)
865{
866	struct cache_read_response	*read_response;
867	ssize_t	result;
868
869	TRACE_IN(on_read_response_write2);
870	read_response = get_cache_read_response(&qstate->response);
871	if (read_response->data_size > 0) {
872		result = qstate->write_func(qstate, read_response->data,
873			read_response->data_size);
874		if (result != (ssize_t)qstate->kevent_watermark) {
875			TRACE_OUT(on_read_response_write2);
876			return (-1);
877		}
878	}
879
880	finalize_comm_element(&qstate->request);
881	finalize_comm_element(&qstate->response);
882
883	qstate->kevent_watermark = sizeof(int);
884	qstate->kevent_filter = EVFILT_READ;
885	qstate->process_func = on_rw_mapper;
886	TRACE_OUT(on_read_response_write2);
887	return (0);
888}
889
890/*
891 * The functions below are used to process write requests.
892 * - on_transform_request_read1 and on_transform_request_read2 read the
893 *   request itself
894 * - on_transform_request_process processes it
895 * - on_transform_response_write1 sends the response
896 */
897static int
898on_transform_request_read1(struct query_state *qstate)
899{
900	struct cache_transform_request *transform_request;
901	ssize_t	result;
902
903	TRACE_IN(on_transform_request_read1);
904	if (qstate->kevent_watermark == 0)
905		qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
906	else {
907		init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST);
908		transform_request =
909			get_cache_transform_request(&qstate->request);
910
911		result = qstate->read_func(qstate,
912	    		&transform_request->entry_length, sizeof(size_t));
913		result += qstate->read_func(qstate,
914	    		&transform_request->transformation_type, sizeof(int));
915
916		if (result != sizeof(size_t) + sizeof(int)) {
917			TRACE_OUT(on_transform_request_read1);
918			return (-1);
919		}
920
921		if ((transform_request->transformation_type != TT_USER) &&
922		    (transform_request->transformation_type != TT_ALL)) {
923			TRACE_OUT(on_transform_request_read1);
924			return (-1);
925		}
926
927		if (transform_request->entry_length != 0) {
928			if (BUFSIZE_INVALID(transform_request->entry_length)) {
929				TRACE_OUT(on_transform_request_read1);
930				return (-1);
931			}
932
933			transform_request->entry = calloc(1,
934				transform_request->entry_length + 1);
935			assert(transform_request->entry != NULL);
936
937			qstate->process_func = on_transform_request_read2;
938		} else
939			qstate->process_func = on_transform_request_process;
940
941		qstate->kevent_watermark = transform_request->entry_length;
942	}
943
944	TRACE_OUT(on_transform_request_read1);
945	return (0);
946}
947
948static int
949on_transform_request_read2(struct query_state *qstate)
950{
951	struct cache_transform_request	*transform_request;
952	ssize_t	result;
953
954	TRACE_IN(on_transform_request_read2);
955	transform_request = get_cache_transform_request(&qstate->request);
956
957	result = qstate->read_func(qstate, transform_request->entry,
958		transform_request->entry_length);
959
960	if (result != (ssize_t)qstate->kevent_watermark) {
961		TRACE_OUT(on_transform_request_read2);
962		return (-1);
963	}
964
965	qstate->kevent_watermark = 0;
966	qstate->process_func = on_transform_request_process;
967
968	TRACE_OUT(on_transform_request_read2);
969	return (0);
970}
971
972static int
973on_transform_request_process(struct query_state *qstate)
974{
975	struct cache_transform_request *transform_request;
976	struct cache_transform_response *transform_response;
977	struct configuration_entry *config_entry;
978	size_t	i, size;
979
980	TRACE_IN(on_transform_request_process);
981	init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE);
982	transform_response = get_cache_transform_response(&qstate->response);
983	transform_request = get_cache_transform_request(&qstate->request);
984
985	switch (transform_request->transformation_type) {
986	case TT_USER:
987		if (transform_request->entry == NULL) {
988			size = configuration_get_entries_size(s_configuration);
989			for (i = 0; i < size; ++i) {
990			    config_entry = configuration_get_entry(
991				s_configuration, i);
992
993			    if (config_entry->perform_actual_lookups == 0)
994			    	clear_config_entry_part(config_entry,
995				    qstate->eid_str, qstate->eid_str_length);
996			}
997		} else {
998			qstate->config_entry = configuration_find_entry(
999				s_configuration, transform_request->entry);
1000
1001			if (qstate->config_entry == NULL) {
1002				LOG_ERR_2("transform_request",
1003					"can't find configuration"
1004		   			" entry '%s'. aborting request",
1005					transform_request->entry);
1006				transform_response->error_code = -1;
1007				goto fin;
1008			}
1009
1010			if (qstate->config_entry->perform_actual_lookups != 0) {
1011				LOG_ERR_2("transform_request",
1012					"can't transform the cache entry %s"
1013					", because it ised for actual lookups",
1014					transform_request->entry);
1015				transform_response->error_code = -1;
1016				goto fin;
1017			}
1018
1019			clear_config_entry_part(qstate->config_entry,
1020				qstate->eid_str, qstate->eid_str_length);
1021		}
1022		break;
1023	case TT_ALL:
1024		if (qstate->euid != 0)
1025			transform_response->error_code = -1;
1026		else {
1027			if (transform_request->entry == NULL) {
1028				size = configuration_get_entries_size(
1029					s_configuration);
1030				for (i = 0; i < size; ++i) {
1031				    clear_config_entry(
1032					configuration_get_entry(
1033						s_configuration, i));
1034				}
1035			} else {
1036				qstate->config_entry = configuration_find_entry(
1037					s_configuration,
1038					transform_request->entry);
1039
1040				if (qstate->config_entry == NULL) {
1041					LOG_ERR_2("transform_request",
1042						"can't find configuration"
1043		   				" entry '%s'. aborting request",
1044						transform_request->entry);
1045					transform_response->error_code = -1;
1046					goto fin;
1047				}
1048
1049				clear_config_entry(qstate->config_entry);
1050			}
1051		}
1052		break;
1053	default:
1054		transform_response->error_code = -1;
1055	}
1056
1057fin:
1058	qstate->kevent_watermark = 0;
1059	qstate->process_func = on_transform_response_write1;
1060	TRACE_OUT(on_transform_request_process);
1061	return (0);
1062}
1063
1064static int
1065on_transform_response_write1(struct query_state *qstate)
1066{
1067	struct cache_transform_response	*transform_response;
1068	ssize_t	result;
1069
1070	TRACE_IN(on_transform_response_write1);
1071	transform_response = get_cache_transform_response(&qstate->response);
1072	result = qstate->write_func(qstate, &transform_response->error_code,
1073		sizeof(int));
1074	if (result != sizeof(int)) {
1075		TRACE_OUT(on_transform_response_write1);
1076		return (-1);
1077	}
1078
1079	finalize_comm_element(&qstate->request);
1080	finalize_comm_element(&qstate->response);
1081
1082	qstate->kevent_watermark = 0;
1083	qstate->process_func = NULL;
1084	TRACE_OUT(on_transform_response_write1);
1085	return (0);
1086}
1087
1088/*
1089 * Checks if the client's euid and egid do not differ from its uid and gid.
1090 * Returns 0 on success.
1091 */
1092int
1093check_query_eids(struct query_state *qstate)
1094{
1095
1096	return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0);
1097}
1098
1099/*
1100 * Uses the qstate fields to process an "alternate" read - when the buffer is
1101 * too large to be received during one socket read operation
1102 */
1103ssize_t
1104query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes)
1105{
1106	size_t remaining;
1107	ssize_t	result;
1108
1109	TRACE_IN(query_io_buffer_read);
1110	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1111		return (-1);
1112
1113	assert(qstate->io_buffer_p <=
1114		qstate->io_buffer + qstate->io_buffer_size);
1115	remaining = qstate->io_buffer + qstate->io_buffer_size -
1116		qstate->io_buffer_p;
1117	if (nbytes < remaining)
1118		result = nbytes;
1119	else
1120		result = remaining;
1121
1122	memcpy(buf, qstate->io_buffer_p, result);
1123	qstate->io_buffer_p += result;
1124
1125	if (remaining == 0) {
1126		free(qstate->io_buffer);
1127		qstate->io_buffer = NULL;
1128
1129		qstate->write_func = query_socket_write;
1130		qstate->read_func = query_socket_read;
1131	}
1132
1133	TRACE_OUT(query_io_buffer_read);
1134	return (result);
1135}
1136
1137/*
1138 * Uses the qstate fields to process an "alternate" write - when the buffer is
1139 * too large to be sent during one socket write operation
1140 */
1141ssize_t
1142query_io_buffer_write(struct query_state *qstate, const void *buf,
1143	size_t nbytes)
1144{
1145	size_t remaining;
1146	ssize_t	result;
1147
1148	TRACE_IN(query_io_buffer_write);
1149	if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1150		return (-1);
1151
1152	assert(qstate->io_buffer_p <=
1153		qstate->io_buffer + qstate->io_buffer_size);
1154	remaining = qstate->io_buffer + qstate->io_buffer_size -
1155		qstate->io_buffer_p;
1156	if (nbytes < remaining)
1157		result = nbytes;
1158	else
1159		result = remaining;
1160
1161	memcpy(qstate->io_buffer_p, buf, result);
1162	qstate->io_buffer_p += result;
1163
1164	if (remaining == 0) {
1165		qstate->use_alternate_io = 1;
1166		qstate->io_buffer_p = qstate->io_buffer;
1167
1168		qstate->write_func = query_socket_write;
1169		qstate->read_func = query_socket_read;
1170	}
1171
1172	TRACE_OUT(query_io_buffer_write);
1173	return (result);
1174}
1175
1176/*
1177 * The default "read" function, which reads data directly from socket
1178 */
1179ssize_t
1180query_socket_read(struct query_state *qstate, void *buf, size_t nbytes)
1181{
1182	ssize_t	result;
1183
1184	TRACE_IN(query_socket_read);
1185	if (qstate->socket_failed != 0) {
1186		TRACE_OUT(query_socket_read);
1187		return (-1);
1188	}
1189
1190	result = read(qstate->sockfd, buf, nbytes);
1191	if (result < 0 || (size_t)result < nbytes)
1192		qstate->socket_failed = 1;
1193
1194	TRACE_OUT(query_socket_read);
1195	return (result);
1196}
1197
1198/*
1199 * The default "write" function, which writes data directly to socket
1200 */
1201ssize_t
1202query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes)
1203{
1204	ssize_t	result;
1205
1206	TRACE_IN(query_socket_write);
1207	if (qstate->socket_failed != 0) {
1208		TRACE_OUT(query_socket_write);
1209		return (-1);
1210	}
1211
1212	result = write(qstate->sockfd, buf, nbytes);
1213	if (result < 0 || (size_t)result < nbytes)
1214		qstate->socket_failed = 1;
1215
1216	TRACE_OUT(query_socket_write);
1217	return (result);
1218}
1219
1220/*
1221 * Initializes the query_state structure by filling it with the default values.
1222 */
1223struct query_state *
1224init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid)
1225{
1226	struct query_state	*retval;
1227
1228	TRACE_IN(init_query_state);
1229	retval = calloc(1, sizeof(*retval));
1230	assert(retval != NULL);
1231
1232	retval->sockfd = sockfd;
1233	retval->kevent_filter = EVFILT_READ;
1234	retval->kevent_watermark = kevent_watermark;
1235
1236	retval->euid = euid;
1237	retval->egid = egid;
1238	retval->uid = retval->gid = -1;
1239
1240	if (asprintf(&retval->eid_str, "%d_%d_", retval->euid,
1241		retval->egid) == -1) {
1242		free(retval);
1243		return (NULL);
1244	}
1245	retval->eid_str_length = strlen(retval->eid_str);
1246
1247	init_comm_element(&retval->request, CET_UNDEFINED);
1248	init_comm_element(&retval->response, CET_UNDEFINED);
1249	retval->process_func = on_query_startup;
1250	retval->destroy_func = on_query_destroy;
1251
1252	retval->write_func = query_socket_write;
1253	retval->read_func = query_socket_read;
1254
1255	get_time_func(&retval->creation_time);
1256	memcpy(&retval->timeout, &s_configuration->query_timeout,
1257		sizeof(struct timeval));
1258
1259	TRACE_OUT(init_query_state);
1260	return (retval);
1261}
1262
1263void
1264destroy_query_state(struct query_state *qstate)
1265{
1266
1267	TRACE_IN(destroy_query_state);
1268	if (qstate->eid_str != NULL)
1269	    free(qstate->eid_str);
1270
1271	if (qstate->io_buffer != NULL)
1272		free(qstate->io_buffer);
1273
1274	qstate->destroy_func(qstate);
1275	free(qstate);
1276	TRACE_OUT(destroy_query_state);
1277}
1278