summaryrefslogtreecommitdiffstats
path: root/usr.sbin/nscd
diff options
context:
space:
mode:
authorume <ume@FreeBSD.org>2006-04-28 12:03:38 +0000
committerume <ume@FreeBSD.org>2006-04-28 12:03:38 +0000
commite14f1c3b3b31e7f6c28cef5acc87f4c7373d288c (patch)
treed796503361cc28eb3b9eaa593876abd826a2cf81 /usr.sbin/nscd
parent4b38e5bbca25f143cbf615b8c7fe1f7873ba1e6c (diff)
downloadFreeBSD-src-e14f1c3b3b31e7f6c28cef5acc87f4c7373d288c.zip
FreeBSD-src-e14f1c3b3b31e7f6c28cef5acc87f4c7373d288c.tar.gz
- Extend the nsswitch to support Services, Protocols and Rpc
databases. - Make nsswitch support caching. Submitted by: Michael Bushkov <bushman__at__rsu.ru> Sponsored by: Google Summer of Code 2005
Diffstat (limited to 'usr.sbin/nscd')
-rw-r--r--usr.sbin/nscd/Makefile19
-rw-r--r--usr.sbin/nscd/agent.c127
-rw-r--r--usr.sbin/nscd/agent.h72
-rw-r--r--usr.sbin/nscd/agents/Makefile.inc3
-rw-r--r--usr.sbin/nscd/agents/group.c262
-rw-r--r--usr.sbin/nscd/agents/group.h32
-rw-r--r--usr.sbin/nscd/agents/passwd.c269
-rw-r--r--usr.sbin/nscd/agents/passwd.h32
-rw-r--r--usr.sbin/nscd/agents/services.c286
-rw-r--r--usr.sbin/nscd/agents/services.h32
-rw-r--r--usr.sbin/nscd/cachelib.c1234
-rw-r--r--usr.sbin/nscd/cachelib.h281
-rw-r--r--usr.sbin/nscd/cacheplcs.c590
-rw-r--r--usr.sbin/nscd/cacheplcs.h137
-rw-r--r--usr.sbin/nscd/config.c588
-rw-r--r--usr.sbin/nscd/config.h156
-rw-r--r--usr.sbin/nscd/debug.c149
-rw-r--r--usr.sbin/nscd/debug.h67
-rw-r--r--usr.sbin/nscd/hashtable.h218
-rw-r--r--usr.sbin/nscd/log.c78
-rw-r--r--usr.sbin/nscd/log.h43
-rw-r--r--usr.sbin/nscd/mp_rs_query.34
-rw-r--r--usr.sbin/nscd/mp_rs_query.c537
-rw-r--r--usr.sbin/nscd/mp_rs_query.h34
-rw-r--r--usr.sbin/nscd/mp_ws_query.c548
-rw-r--r--usr.sbin/nscd/mp_ws_query.h36
-rw-r--r--usr.sbin/nscd/nscd.8148
-rw-r--r--usr.sbin/nscd/nscd.c884
-rw-r--r--usr.sbin/nscd/nscd.conf.5102
-rw-r--r--usr.sbin/nscd/nscdcli.c284
-rw-r--r--usr.sbin/nscd/nscdcli.h57
-rw-r--r--usr.sbin/nscd/parser.c474
-rw-r--r--usr.sbin/nscd/parser.h35
-rw-r--r--usr.sbin/nscd/protocol.c550
-rw-r--r--usr.sbin/nscd/protocol.h265
-rw-r--r--usr.sbin/nscd/query.c1278
-rw-r--r--usr.sbin/nscd/query.h110
-rw-r--r--usr.sbin/nscd/singletons.c36
-rw-r--r--usr.sbin/nscd/singletons.h47
39 files changed, 10134 insertions, 0 deletions
diff --git a/usr.sbin/nscd/Makefile b/usr.sbin/nscd/Makefile
new file mode 100644
index 0000000..5478341
--- /dev/null
+++ b/usr.sbin/nscd/Makefile
@@ -0,0 +1,19 @@
+# $FreeBSD$
+
+PROG=cached
+PROGNAME=cached
+MAN=cached.conf.5 cached.8
+
+WARNS?=2
+SRCS= agent.c cached.c cachedcli.c cachelib.c cacheplcs.c debug.c log.c \
+ config.c query.c mp_ws_query.c mp_rs_query.c singletons.c protocol.c \
+ parser.c
+CFLAGS+= -DCONFIG_PATH="\"${PREFIX}/etc/cached.conf\""
+DPADD+=${LIBM} ${LIBPTHREAD} ${LIBUTIL}
+LDADD+=${LIBM} ${LIBPTHREAD} ${LIBUTIL}
+LDFLAGS+= -Xlinker --export-dynamic
+
+.PATH: ${.CURDIR}/agents
+.include "agents/Makefile.inc"
+.include "../Makefile.inc"
+.include <bsd.prog.mk>
diff --git a/usr.sbin/nscd/agent.c b/usr.sbin/nscd/agent.c
new file mode 100644
index 0000000..2d58ef1
--- /dev/null
+++ b/usr.sbin/nscd/agent.c
@@ -0,0 +1,127 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <assert.h>
+#include <string.h>
+#include <stdlib.h>
+#include "agent.h"
+#include "debug.h"
+
+static int
+agent_cmp_func(const void *a1, const void *a2)
+{
+ struct agent const *ap1 = *((struct agent const **)a1);
+ struct agent const *ap2 = *((struct agent const **)a2);
+ int res;
+
+ res = strcmp(ap1->name, ap2->name);
+ if (res == 0) {
+ if (ap1->type == ap2->type)
+ res = 0;
+ else if (ap1->type < ap2->type)
+ res = -1;
+ else
+ res = 1;
+ }
+
+ return (res);
+}
+
+struct agent_table *
+init_agent_table()
+{
+ struct agent_table *retval;
+
+ TRACE_IN(init_agent_table);
+ retval = (struct agent_table *)malloc(sizeof(struct agent_table));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct agent_table));
+
+ TRACE_OUT(init_agent_table);
+ return (retval);
+}
+
+void
+register_agent(struct agent_table *at, struct agent *a)
+{
+ struct agent **new_agents;
+ size_t new_agents_num;
+
+ TRACE_IN(register_agent);
+ assert(at != NULL);
+ assert(a != NULL);
+ new_agents_num = at->agents_num + 1;
+ new_agents = (struct agent **)malloc(sizeof(struct agent *) *
+ new_agents_num);
+ assert(new_agents != NULL);
+ memcpy(new_agents, at->agents, at->agents_num * sizeof(struct agent *));
+ new_agents[new_agents_num - 1] = a;
+ qsort(new_agents, new_agents_num, sizeof(struct agent *),
+ agent_cmp_func);
+
+ free(at->agents);
+ at->agents = new_agents;
+ at->agents_num = new_agents_num;
+ TRACE_OUT(register_agent);
+}
+
+struct agent *
+find_agent(struct agent_table *at, const char *name, enum agent_type type)
+{
+ struct agent **res;
+ struct agent model, *model_p;
+
+ TRACE_IN(find_agent);
+ model.name = (char *)name;
+ model.type = type;
+ model_p = &model;
+ res = bsearch(&model_p, at->agents, at->agents_num,
+ sizeof(struct agent *), agent_cmp_func);
+
+ TRACE_OUT(find_agent);
+ return ( res == NULL ? NULL : *res);
+}
+
+void
+destroy_agent_table(struct agent_table *at)
+{
+ size_t i;
+
+ TRACE_IN(destroy_agent_table);
+ assert(at != NULL);
+ for (i = 0; i < at->agents_num; ++i) {
+ free(at->agents[i]->name);
+ free(at->agents[i]);
+ }
+
+ free(at->agents);
+ free(at);
+ TRACE_OUT(destroy_agent_table);
+}
diff --git a/usr.sbin/nscd/agent.h b/usr.sbin/nscd/agent.h
new file mode 100644
index 0000000..5dc368d
--- /dev/null
+++ b/usr.sbin/nscd/agent.h
@@ -0,0 +1,72 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_AGENT_H__
+#define __CACHED_AGENT_H__
+
+/*
+ * Agents are used to perform the actual lookups from the caching daemon.
+ * There are two types of daemons: for common requests and for multipart
+ * requests.
+ * All agents are stored in the agents table, which is the singleton.
+ */
+
+enum agent_type {
+ COMMON_AGENT = 0,
+ MULTIPART_AGENT = 1
+};
+
+struct agent {
+ char *name;
+ enum agent_type type;
+};
+
+struct common_agent {
+ struct agent parent;
+ int (*lookup_func)(const char *, size_t, char **, size_t *);
+};
+
+struct multipart_agent {
+ struct agent parent;
+ void *(*mp_init_func)();
+ int (*mp_lookup_func)(char **, size_t *, void *);
+ void (*mp_destroy_func)(void *);
+};
+
+struct agent_table {
+ struct agent **agents;
+ size_t agents_num;
+};
+
+extern struct agent_table *init_agent_table();
+extern void register_agent(struct agent_table *, struct agent *);
+extern struct agent *find_agent(struct agent_table *, const char *,
+ enum agent_type);
+extern void destroy_agent_table(struct agent_table *);
+
+#endif
diff --git a/usr.sbin/nscd/agents/Makefile.inc b/usr.sbin/nscd/agents/Makefile.inc
new file mode 100644
index 0000000..1be32e1
--- /dev/null
+++ b/usr.sbin/nscd/agents/Makefile.inc
@@ -0,0 +1,3 @@
+# $FreeBSD$
+
+SRCS += passwd.c group.c services.c
diff --git a/usr.sbin/nscd/agents/group.c b/usr.sbin/nscd/agents/group.c
new file mode 100644
index 0000000..b9190be
--- /dev/null
+++ b/usr.sbin/nscd/agents/group.c
@@ -0,0 +1,262 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <assert.h>
+#include <nsswitch.h>
+#include <grp.h>
+#include <string.h>
+#include <stdlib.h>
+#include "../debug.h"
+#include "passwd.h"
+
+static int group_marshal_func(struct group *, char *, size_t *);
+static int group_lookup_func(const char *, size_t, char **, size_t *);
+static void *group_mp_init_func();
+static int group_mp_lookup_func(char **, size_t *, void *);
+static void group_mp_destroy_func(void *);
+
+static int
+group_marshal_func(struct group *grp, char *buffer, size_t *buffer_size)
+{
+ struct group new_grp;
+ size_t desired_size, size, mem_size;
+ char *p, **mem;
+
+ TRACE_IN(group_marshal_func);
+ desired_size = ALIGNBYTES + sizeof(struct group) + sizeof(char *);
+
+ if (grp->gr_name != NULL)
+ desired_size += strlen(grp->gr_name) + 1;
+ if (grp->gr_passwd != NULL)
+ desired_size += strlen(grp->gr_passwd) + 1;
+
+ if (grp->gr_mem != NULL) {
+ mem_size = 0;
+ for (mem = grp->gr_mem; *mem; ++mem) {
+ desired_size += strlen(*mem) + 1;
+ ++mem_size;
+ }
+
+ desired_size += ALIGNBYTES + (mem_size + 1) * sizeof(char *);
+ }
+
+ if ((desired_size > *buffer_size) || (buffer == NULL)) {
+ *buffer_size = desired_size;
+ TRACE_OUT(group_marshal_func);
+ return (NS_RETURN);
+ }
+
+ memcpy(&new_grp, grp, sizeof(struct group));
+ memset(buffer, 0, desired_size);
+
+ *buffer_size = desired_size;
+ p = buffer + sizeof(struct group) + sizeof(char *);
+ memcpy(buffer + sizeof(struct group), &p, sizeof(char *));
+ p = (char *)ALIGN(p);
+
+ if (new_grp.gr_name != NULL) {
+ size = strlen(new_grp.gr_name);
+ memcpy(p, new_grp.gr_name, size);
+ new_grp.gr_name = p;
+ p += size + 1;
+ }
+
+ if (new_grp.gr_passwd != NULL) {
+ size = strlen(new_grp.gr_passwd);
+ memcpy(p, new_grp.gr_passwd, size);
+ new_grp.gr_passwd = p;
+ p += size + 1;
+ }
+
+ if (new_grp.gr_mem != NULL) {
+ p = (char *)ALIGN(p);
+ memcpy(p, new_grp.gr_mem, sizeof(char *) * mem_size);
+ new_grp.gr_mem = (char **)p;
+ p += sizeof(char *) * (mem_size + 1);
+
+ for (mem = new_grp.gr_mem; *mem; ++mem) {
+ size = strlen(*mem);
+ memcpy(p, *mem, size);
+ *mem = p;
+ p += size + 1;
+ }
+ }
+
+ memcpy(buffer, &new_grp, sizeof(struct group));
+ TRACE_OUT(group_marshal_func);
+ return (NS_SUCCESS);
+}
+
+static int
+group_lookup_func(const char *key, size_t key_size, char **buffer,
+ size_t *buffer_size)
+{
+ enum nss_lookup_type lookup_type;
+ char *name;
+ size_t size;
+ gid_t gid;
+
+ struct group *result;
+
+ TRACE_IN(group_lookup_func);
+ assert(buffer != NULL);
+ assert(buffer_size != NULL);
+
+ if (key_size < sizeof(enum nss_lookup_type)) {
+ TRACE_OUT(group_lookup_func);
+ return (NS_UNAVAIL);
+ }
+ memcpy(&lookup_type, key, sizeof(enum nss_lookup_type));
+
+ switch (lookup_type) {
+ case nss_lt_name:
+ size = key_size - sizeof(enum nss_lookup_type) + 1;
+ name = (char *)malloc(size);
+ assert(name != NULL);
+ memset(name, 0, size);
+ memcpy(name, key + sizeof(enum nss_lookup_type), size - 1);
+ break;
+ case nss_lt_id:
+ if (key_size < sizeof(enum nss_lookup_type) +
+ sizeof(gid_t)) {
+ TRACE_OUT(passwd_lookup_func);
+ return (NS_UNAVAIL);
+ }
+
+ memcpy(&gid, key + sizeof(enum nss_lookup_type), sizeof(gid_t));
+ break;
+ default:
+ TRACE_OUT(group_lookup_func);
+ return (NS_UNAVAIL);
+ }
+
+ switch (lookup_type) {
+ case nss_lt_name:
+ TRACE_STR(name);
+ result = getgrnam(name);
+ free(name);
+ break;
+ case nss_lt_id:
+ result = getgrgid(gid);
+ break;
+ default:
+ /* SHOULD NOT BE REACHED */
+ break;
+ }
+
+ if (result != NULL) {
+ group_marshal_func(result, NULL, buffer_size);
+ *buffer = (char *)malloc(*buffer_size);
+ assert(*buffer != NULL);
+ group_marshal_func(result, *buffer, buffer_size);
+ }
+
+ TRACE_OUT(group_lookup_func);
+ return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
+}
+
+static void *
+group_mp_init_func()
+{
+ TRACE_IN(group_mp_init_func);
+ setgrent();
+ TRACE_OUT(group_mp_init_func);
+
+ return (NULL);
+}
+
+static int
+group_mp_lookup_func(char **buffer, size_t *buffer_size, void *mdata)
+{
+ struct group *result;
+
+ TRACE_IN(group_mp_lookup_func);
+ result = getgrent();
+ if (result != NULL) {
+ group_marshal_func(result, NULL, buffer_size);
+ *buffer = (char *)malloc(*buffer_size);
+ assert(*buffer != NULL);
+ group_marshal_func(result, *buffer, buffer_size);
+ }
+
+ TRACE_OUT(group_mp_lookup_func);
+ return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
+}
+
+static void
+group_mp_destroy_func(void *mdata)
+{
+ TRACE_IN(group_mp_destroy_func);
+ TRACE_OUT(group_mp_destroy_func);
+}
+
+struct agent *
+init_group_agent()
+{
+ struct common_agent *retval;
+
+ TRACE_IN(init_group_agent);
+ retval = (struct common_agent *)malloc(sizeof(struct common_agent));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct common_agent));
+
+ retval->parent.name = strdup("group");
+ assert(retval->parent.name != NULL);
+
+ retval->parent.type = COMMON_AGENT;
+ retval->lookup_func = group_lookup_func;
+
+ TRACE_OUT(init_group_agent);
+ return ((struct agent *)retval);
+}
+
+struct agent *
+init_group_mp_agent()
+{
+ struct multipart_agent *retval;
+
+ TRACE_IN(init_group_mp_agent);
+ retval = (struct multipart_agent *)malloc(
+ sizeof(struct multipart_agent));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct multipart_agent));
+
+ retval->parent.name = strdup("group");
+ retval->parent.type = MULTIPART_AGENT;
+ retval->mp_init_func = group_mp_init_func;
+ retval->mp_lookup_func = group_mp_lookup_func;
+ retval->mp_destroy_func = group_mp_destroy_func;
+ assert(retval->parent.name != NULL);
+
+ TRACE_OUT(init_group_mp_agent);
+ return ((struct agent *)retval);
+}
diff --git a/usr.sbin/nscd/agents/group.h b/usr.sbin/nscd/agents/group.h
new file mode 100644
index 0000000..e6c7397
--- /dev/null
+++ b/usr.sbin/nscd/agents/group.h
@@ -0,0 +1,32 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "../agent.h"
+
+extern struct agent *init_group_agent();
+extern struct agent *init_group_mp_agent();
diff --git a/usr.sbin/nscd/agents/passwd.c b/usr.sbin/nscd/agents/passwd.c
new file mode 100644
index 0000000..50c55ba
--- /dev/null
+++ b/usr.sbin/nscd/agents/passwd.c
@@ -0,0 +1,269 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <assert.h>
+#include <nsswitch.h>
+#include <pwd.h>
+#include <string.h>
+#include <stdlib.h>
+#include "../debug.h"
+#include "passwd.h"
+
+static int passwd_marshal_func(struct passwd *, char *, size_t *);
+static int passwd_lookup_func(const char *, size_t, char **, size_t *);
+static void *passwd_mp_init_func();
+static int passwd_mp_lookup_func(char **, size_t *, void *);
+static void passwd_mp_destroy_func(void *mdata);
+
+static int
+passwd_marshal_func(struct passwd *pwd, char *buffer, size_t *buffer_size)
+{
+ char *p;
+ struct passwd new_pwd;
+ size_t desired_size, size;
+
+ TRACE_IN(passwd_marshal_func);
+ desired_size = sizeof(struct passwd) + sizeof(char *) +
+ strlen(pwd->pw_name) + 1;
+ if (pwd->pw_passwd != NULL)
+ desired_size += strlen(pwd->pw_passwd) + 1;
+ if (pwd->pw_class != NULL)
+ desired_size += strlen(pwd->pw_class) + 1;
+ if (pwd->pw_gecos != NULL)
+ desired_size += strlen(pwd->pw_gecos) + 1;
+ if (pwd->pw_dir != NULL)
+ desired_size += strlen(pwd->pw_dir) + 1;
+ if (pwd->pw_shell != NULL)
+ desired_size += strlen(pwd->pw_shell) + 1;
+
+ if ((*buffer_size < desired_size) || (buffer == NULL)) {
+ *buffer_size = desired_size;
+ TRACE_OUT(passwd_marshal_func);
+ return (NS_RETURN);
+ }
+
+ memcpy(&new_pwd, pwd, sizeof(struct passwd));
+ memset(buffer, 0, desired_size);
+
+ *buffer_size = desired_size;
+ p = buffer + sizeof(struct passwd) + sizeof(char *);
+ memcpy(buffer + sizeof(struct passwd), &p, sizeof(char *));
+
+ if (new_pwd.pw_name != NULL) {
+ size = strlen(new_pwd.pw_name);
+ memcpy(p, new_pwd.pw_name, size);
+ new_pwd.pw_name = p;
+ p += size + 1;
+ }
+
+ if (new_pwd.pw_passwd != NULL) {
+ size = strlen(new_pwd.pw_passwd);
+ memcpy(p, new_pwd.pw_passwd, size);
+ new_pwd.pw_passwd = p;
+ p += size + 1;
+ }
+
+ if (new_pwd.pw_class != NULL) {
+ size = strlen(new_pwd.pw_class);
+ memcpy(p, new_pwd.pw_class, size);
+ new_pwd.pw_class = p;
+ p += size + 1;
+ }
+
+ if (new_pwd.pw_gecos != NULL) {
+ size = strlen(new_pwd.pw_gecos);
+ memcpy(p, new_pwd.pw_gecos, size);
+ new_pwd.pw_gecos = p;
+ p += size + 1;
+ }
+
+ if (new_pwd.pw_dir != NULL) {
+ size = strlen(new_pwd.pw_dir);
+ memcpy(p, new_pwd.pw_dir, size);
+ new_pwd.pw_dir = p;
+ p += size + 1;
+ }
+
+ if (new_pwd.pw_shell != NULL) {
+ size = strlen(new_pwd.pw_shell);
+ memcpy(p, new_pwd.pw_shell, size);
+ new_pwd.pw_shell = p;
+ p += size + 1;
+ }
+
+ memcpy(buffer, &new_pwd, sizeof(struct passwd));
+ TRACE_OUT(passwd_marshal_func);
+ return (NS_SUCCESS);
+}
+
+static int
+passwd_lookup_func(const char *key, size_t key_size, char **buffer,
+ size_t *buffer_size)
+{
+ enum nss_lookup_type lookup_type;
+ char *login;
+ size_t size;
+ uid_t uid;
+
+ struct passwd *result;
+
+ TRACE_IN(passwd_lookup_func);
+ assert(buffer != NULL);
+ assert(buffer_size != NULL);
+
+ if (key_size < sizeof(enum nss_lookup_type)) {
+ TRACE_OUT(passwd_lookup_func);
+ return (NS_UNAVAIL);
+ }
+ memcpy(&lookup_type, key, sizeof(enum nss_lookup_type));
+
+ switch (lookup_type) {
+ case nss_lt_name:
+ size = key_size - sizeof(enum nss_lookup_type) + 1;
+ login = (char *)malloc(size);
+ assert(login != NULL);
+ memset(login, 0, size);
+ memcpy(login, key + sizeof(enum nss_lookup_type), size - 1);
+ break;
+ case nss_lt_id:
+ if (key_size < sizeof(enum nss_lookup_type) +
+ sizeof(uid_t)) {
+ TRACE_OUT(passwd_lookup_func);
+ return (NS_UNAVAIL);
+ }
+
+ memcpy(&uid, key + sizeof(enum nss_lookup_type), sizeof(uid_t));
+ break;
+ default:
+ TRACE_OUT(passwd_lookup_func);
+ return (NS_UNAVAIL);
+ }
+
+ switch (lookup_type) {
+ case nss_lt_name:
+ result = getpwnam(login);
+ free(login);
+ break;
+ case nss_lt_id:
+ result = getpwuid(uid);
+ break;
+ default:
+ /* SHOULD NOT BE REACHED */
+ break;
+ }
+
+ if (result != NULL) {
+ passwd_marshal_func(result, NULL, buffer_size);
+ *buffer = (char *)malloc(*buffer_size);
+ assert(*buffer != NULL);
+ passwd_marshal_func(result, *buffer, buffer_size);
+ }
+
+ TRACE_OUT(passwd_lookup_func);
+ return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
+}
+
+static void *
+passwd_mp_init_func()
+{
+ TRACE_IN(passwd_mp_init_func);
+ setpwent();
+ TRACE_OUT(passwd_mp_init_func);
+
+ return (NULL);
+}
+
+static int
+passwd_mp_lookup_func(char **buffer, size_t *buffer_size, void *mdata)
+{
+ struct passwd *result;
+
+ TRACE_IN(passwd_mp_lookup_func);
+ result = getpwent();
+ if (result != NULL) {
+ passwd_marshal_func(result, NULL, buffer_size);
+ *buffer = (char *)malloc(*buffer_size);
+ assert(*buffer != NULL);
+ passwd_marshal_func(result, *buffer, buffer_size);
+ }
+
+ TRACE_OUT(passwd_mp_lookup_func);
+ return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
+}
+
+static void
+passwd_mp_destroy_func(void *mdata)
+{
+ TRACE_IN(passwd_mp_destroy_func);
+ TRACE_OUT(passwd_mp_destroy_func);
+}
+
+struct agent *
+init_passwd_agent()
+{
+ struct common_agent *retval;
+
+ TRACE_IN(init_passwd_agent);
+ retval = (struct common_agent *)malloc(sizeof(struct common_agent));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct common_agent));
+
+ retval->parent.name = strdup("passwd");
+ assert(retval->parent.name != NULL);
+
+ retval->parent.type = COMMON_AGENT;
+ retval->lookup_func = passwd_lookup_func;
+
+ TRACE_OUT(init_passwd_agent);
+ return ((struct agent *)retval);
+}
+
+struct agent *
+init_passwd_mp_agent()
+{
+ struct multipart_agent *retval;
+
+ TRACE_IN(init_passwd_mp_agent);
+ retval = (struct multipart_agent *)malloc(
+ sizeof(struct multipart_agent));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct multipart_agent));
+
+ retval->parent.name = strdup("passwd");
+ retval->parent.type = MULTIPART_AGENT;
+ retval->mp_init_func = passwd_mp_init_func;
+ retval->mp_lookup_func = passwd_mp_lookup_func;
+ retval->mp_destroy_func = passwd_mp_destroy_func;
+ assert(retval->parent.name != NULL);
+
+ TRACE_OUT(init_passwd_mp_agent);
+ return ((struct agent *)retval);
+}
diff --git a/usr.sbin/nscd/agents/passwd.h b/usr.sbin/nscd/agents/passwd.h
new file mode 100644
index 0000000..956a50d
--- /dev/null
+++ b/usr.sbin/nscd/agents/passwd.h
@@ -0,0 +1,32 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "../agent.h"
+
+extern struct agent *init_passwd_agent();
+extern struct agent *init_passwd_mp_agent();
diff --git a/usr.sbin/nscd/agents/services.c b/usr.sbin/nscd/agents/services.c
new file mode 100644
index 0000000..3683396
--- /dev/null
+++ b/usr.sbin/nscd/agents/services.c
@@ -0,0 +1,286 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <assert.h>
+#include <nsswitch.h>
+#include <netdb.h>
+#include <string.h>
+#include <stdlib.h>
+#include "../debug.h"
+#include "services.h"
+
+static int services_marshal_func(struct servent *, char *, size_t *);
+static int services_lookup_func(const char *, size_t, char **, size_t *);
+static void *services_mp_init_func();
+static int services_mp_lookup_func(char **, size_t *, void *);
+static void services_mp_destroy_func(void *);
+
+static int
+services_marshal_func(struct servent *serv, char *buffer, size_t *buffer_size)
+{
+ struct servent new_serv;
+ size_t desired_size;
+ char **alias;
+ char *p;
+ size_t size;
+ size_t aliases_size;
+
+ TRACE_IN(services_marshal_func);
+ desired_size = ALIGNBYTES + sizeof(struct servent) + sizeof(char *);
+ if (serv->s_name != NULL)
+ desired_size += strlen(serv->s_name) + 1;
+ if (serv->s_proto != NULL)
+ desired_size += strlen(serv->s_proto) + 1;
+
+ aliases_size = 0;
+ if (serv->s_aliases != NULL) {
+ for (alias = serv->s_aliases; *alias; ++alias) {
+ desired_size += strlen(*alias) + 1;
+ ++aliases_size;
+ }
+
+ desired_size += ALIGNBYTES + sizeof(char *) *
+ (aliases_size + 1);
+ }
+
+ if ((*buffer_size < desired_size) || (buffer == NULL)) {
+ *buffer_size = desired_size;
+ TRACE_OUT(services_marshal_func);
+ return (NS_RETURN);
+ }
+
+ memcpy(&new_serv, serv, sizeof(struct servent));
+ memset(buffer, 0, desired_size);
+
+ *buffer_size = desired_size;
+ p = buffer + sizeof(struct servent) + sizeof(char *);
+ memcpy(buffer + sizeof(struct servent), &p, sizeof(char *));
+ p = (char *)ALIGN(p);
+
+ if (new_serv.s_name != NULL) {
+ size = strlen(new_serv.s_name);
+ memcpy(p, new_serv.s_name, size);
+ new_serv.s_name = p;
+ p += size + 1;
+ }
+
+ if (new_serv.s_proto != NULL) {
+ size = strlen(new_serv.s_proto);
+ memcpy(p, new_serv.s_proto, size);
+ new_serv.s_proto = p;
+ p += size + 1;
+ }
+
+ if (new_serv.s_aliases != NULL) {
+ p = (char *)ALIGN(p);
+ memcpy(p, new_serv.s_aliases, sizeof(char *) * aliases_size);
+ new_serv.s_aliases = (char **)p;
+ p += sizeof(char *) * (aliases_size + 1);
+
+ for (alias = new_serv.s_aliases; *alias; ++alias) {
+ size = strlen(*alias);
+ memcpy(p, *alias, size);
+ *alias = p;
+ p += size + 1;
+ }
+ }
+
+ memcpy(buffer, &new_serv, sizeof(struct servent));
+ TRACE_OUT(services_marshal_func);
+ return (NS_SUCCESS);
+}
+
+static int
+services_lookup_func(const char *key, size_t key_size, char **buffer,
+ size_t *buffer_size)
+{
+ enum nss_lookup_type lookup_type;
+ char *name = NULL;
+ char *proto = NULL;
+ size_t size, size2;
+ int port;
+
+ struct servent *result;
+
+ TRACE_IN(services_lookup_func);
+
+ assert(buffer != NULL);
+ assert(buffer_size != NULL);
+
+ if (key_size < sizeof(enum nss_lookup_type)) {
+ TRACE_OUT(passwd_lookup_func);
+ return (NS_UNAVAIL);
+ }
+ memcpy(&lookup_type, key, sizeof(enum nss_lookup_type));
+
+ switch (lookup_type) {
+ case nss_lt_name:
+ size = key_size - sizeof(enum nss_lookup_type) + 1;
+ name = (char *)malloc(size);
+ assert(name != NULL);
+ memset(name, 0, size);
+ memcpy(name, key + sizeof(enum nss_lookup_type), size - 1);
+
+ size2 = strlen(name) + 1;
+ if (size2 < size) {
+ proto = strchr(name, '\0');
+ if (strrchr(name, '\0') > proto)
+ ++proto ;
+ else
+ proto = NULL;
+ }
+ break;
+ case nss_lt_id:
+ if (key_size < sizeof(enum nss_lookup_type) +
+ sizeof(int)) {
+ TRACE_OUT(passwd_lookup_func);
+ return (NS_UNAVAIL);
+ }
+
+ memcpy(&port, key + sizeof(enum nss_lookup_type),
+ sizeof(int));
+
+ size = key_size - sizeof(enum nss_lookup_type) + sizeof(int);
+ if (size > 0) {
+ proto = (char *)malloc(size + 1);
+ assert(proto != NULL);
+ memset(proto, size + 1, 0);
+ memcpy(proto, key + sizeof(enum nss_lookup_type) +
+ sizeof(int), size);
+ }
+ break;
+ default:
+ TRACE_OUT(passwd_lookup_func);
+ return (NS_UNAVAIL);
+ }
+
+ switch (lookup_type) {
+ case nss_lt_name:
+ result = getservbyname(name, proto);
+ free(name);
+ break;
+ case nss_lt_id:
+ result = getservbyport(port, proto);
+ free(proto);
+ break;
+ default:
+ /* SHOULD NOT BE REACHED */
+ break;
+ }
+
+ if (result != NULL) {
+ services_marshal_func(result, NULL, buffer_size);
+ *buffer = (char *)malloc(*buffer_size);
+ assert(*buffer != NULL);
+ services_marshal_func(result, *buffer, buffer_size);
+ }
+
+ TRACE_OUT(services_lookup_func);
+ return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
+}
+
+static void *
+services_mp_init_func()
+{
+ TRACE_IN(services_mp_init_func);
+ setservent(0);
+ TRACE_OUT(services_mp_init_func);
+
+ return (NULL);
+}
+
+static int
+services_mp_lookup_func(char **buffer, size_t *buffer_size, void *mdata)
+{
+ struct servent *result;
+
+ TRACE_IN(services_mp_lookup_func);
+ result = getservent();
+ if (result != NULL) {
+ services_marshal_func(result, NULL, buffer_size);
+ *buffer = (char *)malloc(*buffer_size);
+ assert(*buffer != NULL);
+ services_marshal_func(result, *buffer, buffer_size);
+ }
+
+ TRACE_OUT(services_mp_lookup_func);
+ return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
+}
+
+static void
+services_mp_destroy_func(void *mdata)
+{
+ TRACE_IN(services_mp_destroy_func);
+ TRACE_OUT(services_mp_destroy_func);
+}
+
+struct agent *
+init_services_agent()
+{
+ struct common_agent *retval;
+ TRACE_IN(init_services_agent);
+
+ retval = (struct common_agent *)malloc(sizeof(struct common_agent));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct common_agent));
+
+ retval->parent.name = strdup("services");
+ assert(retval->parent.name != NULL);
+
+ retval->parent.type = COMMON_AGENT;
+ retval->lookup_func = services_lookup_func;
+
+ TRACE_OUT(init_services_agent);
+ return ((struct agent *)retval);
+}
+
+struct agent *
+init_services_mp_agent()
+{
+ struct multipart_agent *retval;
+
+ TRACE_IN(init_services_mp_agent);
+ retval = (struct multipart_agent *)malloc(
+ sizeof(struct multipart_agent));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct multipart_agent));
+
+ retval->parent.name = strdup("services");
+ retval->parent.type = MULTIPART_AGENT;
+ retval->mp_init_func = services_mp_init_func;
+ retval->mp_lookup_func = services_mp_lookup_func;
+ retval->mp_destroy_func = services_mp_destroy_func;
+ assert(retval->parent.name != NULL);
+
+ TRACE_OUT(init_services_mp_agent);
+ return ((struct agent *)retval);
+}
diff --git a/usr.sbin/nscd/agents/services.h b/usr.sbin/nscd/agents/services.h
new file mode 100644
index 0000000..0b77c87
--- /dev/null
+++ b/usr.sbin/nscd/agents/services.h
@@ -0,0 +1,32 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "../agent.h"
+
+extern struct agent *init_services_agent();
+extern struct agent *init_services_mp_agent();
diff --git a/usr.sbin/nscd/cachelib.c b/usr.sbin/nscd/cachelib.c
new file mode 100644
index 0000000..4f771cc
--- /dev/null
+++ b/usr.sbin/nscd/cachelib.c
@@ -0,0 +1,1234 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/time.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include "cachelib.h"
+#include "debug.h"
+
+#define INITIAL_ENTRIES_CAPACITY 32
+#define ENTRIES_CAPACITY_STEP 32
+
+#define STRING_SIMPLE_HASH_BODY(in_var, var, a, M) \
+ for ((var) = 0; *(in_var) != '\0'; ++(in_var)) \
+ (var) = ((a)*(var) + *(in_var)) % (M)
+
+#define STRING_SIMPLE_MP2_HASH_BODY(in_var, var, a, M) \
+ for ((var) = 0; *(in_var) != 0; ++(in_var)) \
+ (var) = ((a)*(var) + *(in_var)) & (M - 1)
+
+static int cache_elemsize_common_continue_func(struct cache_common_entry_ *,
+ struct cache_policy_item_ *);
+static int cache_lifetime_common_continue_func(struct cache_common_entry_ *,
+ struct cache_policy_item_ *);
+static void clear_cache_entry(struct cache_entry_ *);
+static void destroy_cache_entry(struct cache_entry_ *);
+static void destroy_cache_mp_read_session(struct cache_mp_read_session_ *);
+static void destroy_cache_mp_write_session(struct cache_mp_write_session_ *);
+static int entries_bsearch_cmp_func(const void *, const void *);
+static int entries_qsort_cmp_func(const void *, const void *);
+static struct cache_entry_ ** find_cache_entry_p(struct cache_ *,
+ const char *);
+static void flush_cache_entry(struct cache_entry_ *);
+static void flush_cache_policy(struct cache_common_entry_ *,
+ struct cache_policy_ *, struct cache_policy_ *,
+ int (*)(struct cache_common_entry_ *,
+ struct cache_policy_item_ *));
+static int ht_items_cmp_func(const void *, const void *);
+static int ht_items_fixed_size_left_cmp_func(const void *, const void *);
+static hashtable_index_t ht_item_hash_func(const void *, size_t);
+
+/*
+ * Hashing and comparing routines, that are used with the hash tables
+ */
+static int
+ht_items_cmp_func(const void *p1, const void *p2)
+{
+ struct cache_ht_item_data_ *hp1, *hp2;
+ size_t min_size;
+ int result;
+
+ hp1 = (struct cache_ht_item_data_ *)p1;
+ hp2 = (struct cache_ht_item_data_ *)p2;
+
+ assert(hp1->key != NULL);
+ assert(hp2->key != NULL);
+
+ if (hp1->key_size != hp2->key_size) {
+ min_size = (hp1->key_size < hp2->key_size) ? hp1->key_size :
+ hp2->key_size;
+ result = memcmp(hp1->key, hp2->key, min_size);
+
+ if (result == 0)
+ return ((hp1->key_size < hp2->key_size) ? -1 : 1);
+ else
+ return (result);
+ } else
+ return (memcmp(hp1->key, hp2->key, hp1->key_size));
+}
+
+static int
+ht_items_fixed_size_left_cmp_func(const void *p1, const void *p2)
+{
+ struct cache_ht_item_data_ *hp1, *hp2;
+ size_t min_size;
+ int result;
+
+ hp1 = (struct cache_ht_item_data_ *)p1;
+ hp2 = (struct cache_ht_item_data_ *)p2;
+
+ assert(hp1->key != NULL);
+ assert(hp2->key != NULL);
+
+ if (hp1->key_size != hp2->key_size) {
+ min_size = (hp1->key_size < hp2->key_size) ? hp1->key_size :
+ hp2->key_size;
+ result = memcmp(hp1->key, hp2->key, min_size);
+
+ if (result == 0)
+ if (min_size == hp1->key_size)
+ return (0);
+ else
+ return ((hp1->key_size < hp2->key_size) ? -1 : 1);
+ else
+ return (result);
+ } else
+ return (memcmp(hp1->key, hp2->key, hp1->key_size));
+}
+
+static hashtable_index_t
+ht_item_hash_func(const void *p, size_t cache_entries_size)
+{
+ struct cache_ht_item_data_ *hp;
+ size_t i;
+
+ hashtable_index_t retval;
+
+ hp = (struct cache_ht_item_data_ *)p;
+ assert(hp->key != NULL);
+
+ retval = 0;
+ for (i = 0; i < hp->key_size; ++i)
+ retval = (127 * retval + (unsigned char)hp->key[i]) %
+ cache_entries_size;
+
+ return retval;
+}
+
+HASHTABLE_GENERATE(cache_ht_, cache_ht_item_, struct cache_ht_item_data_, data,
+ ht_item_hash_func, ht_items_cmp_func);
+
+/*
+ * Routines to sort and search the entries by name
+ */
+static int
+entries_bsearch_cmp_func(const void *key, const void *ent)
+{
+
+ assert(key != NULL);
+ assert(ent != NULL);
+
+ return (strcmp((char const *)key,
+ (*(struct cache_entry_ const **)ent)->name));
+}
+
+static int
+entries_qsort_cmp_func(const void *e1, const void *e2)
+{
+
+ assert(e1 != NULL);
+ assert(e2 != NULL);
+
+ return (strcmp((*(struct cache_entry_ const **)e1)->name,
+ (*(struct cache_entry_ const **)e2)->name));
+}
+
+static struct cache_entry_ **
+find_cache_entry_p(struct cache_ *the_cache, const char *entry_name)
+{
+
+ return ((struct cache_entry_ **)(bsearch(entry_name, the_cache->entries,
+ the_cache->entries_size, sizeof(struct cache_entry_ *),
+ entries_bsearch_cmp_func)));
+}
+
+static void
+destroy_cache_mp_write_session(struct cache_mp_write_session_ *ws)
+{
+
+ struct cache_mp_data_item_ *data_item;
+
+ TRACE_IN(destroy_cache_mp_write_session);
+ assert(ws != NULL);
+ while (!TAILQ_EMPTY(&ws->items)) {
+ data_item = TAILQ_FIRST(&ws->items);
+ TAILQ_REMOVE(&ws->items, data_item, entries);
+ free(data_item->value);
+ free(data_item);
+ }
+
+ free(ws);
+ TRACE_OUT(destroy_cache_mp_write_session);
+}
+
+static void
+destroy_cache_mp_read_session(struct cache_mp_read_session_ *rs)
+{
+
+ TRACE_IN(destroy_cache_mp_read_session);
+ assert(rs != NULL);
+ free(rs);
+ TRACE_OUT(destroy_cache_mp_read_session);
+}
+
+static void
+destroy_cache_entry(struct cache_entry_ *entry)
+{
+ struct cache_common_entry_ *common_entry;
+ struct cache_mp_entry_ *mp_entry;
+ struct cache_mp_read_session_ *rs;
+ struct cache_mp_write_session_ *ws;
+ struct cache_ht_item_ *ht_item;
+ struct cache_ht_item_data_ *ht_item_data;
+
+ TRACE_IN(destroy_cache_entry);
+ assert(entry != NULL);
+
+ if (entry->params->entry_type == CET_COMMON) {
+ common_entry = (struct cache_common_entry_ *)entry;
+
+ HASHTABLE_FOREACH(&(common_entry->items), ht_item) {
+ HASHTABLE_ENTRY_FOREACH(ht_item, data, ht_item_data)
+ {
+ free(ht_item_data->key);
+ free(ht_item_data->value);
+ }
+ HASHTABLE_ENTRY_CLEAR(ht_item, data);
+ }
+
+ HASHTABLE_DESTROY(&(common_entry->items), data);
+
+ /* FIFO policy is always first */
+ destroy_cache_fifo_policy(common_entry->policies[0]);
+ switch (common_entry->common_params.policy) {
+ case CPT_LRU:
+ destroy_cache_lru_policy(common_entry->policies[1]);
+ break;
+ case CPT_LFU:
+ destroy_cache_lfu_policy(common_entry->policies[1]);
+ break;
+ default:
+ break;
+ }
+ free(common_entry->policies);
+ } else {
+ mp_entry = (struct cache_mp_entry_ *)entry;
+
+ while (!TAILQ_EMPTY(&mp_entry->ws_head)) {
+ ws = TAILQ_FIRST(&mp_entry->ws_head);
+ TAILQ_REMOVE(&mp_entry->ws_head, ws, entries);
+ destroy_cache_mp_write_session(ws);
+ }
+
+ while (!TAILQ_EMPTY(&mp_entry->rs_head)) {
+ rs = TAILQ_FIRST(&mp_entry->rs_head);
+ TAILQ_REMOVE(&mp_entry->rs_head, rs, entries);
+ destroy_cache_mp_read_session(rs);
+ }
+
+ if (mp_entry->completed_write_session != NULL)
+ destroy_cache_mp_write_session(
+ mp_entry->completed_write_session);
+
+ if (mp_entry->pending_write_session != NULL)
+ destroy_cache_mp_write_session(
+ mp_entry->pending_write_session);
+ }
+
+ free(entry->name);
+ free(entry);
+ TRACE_OUT(destroy_cache_entry);
+}
+
+static void
+clear_cache_entry(struct cache_entry_ *entry)
+{
+ struct cache_mp_entry_ *mp_entry;
+ struct cache_common_entry_ *common_entry;
+ struct cache_ht_item_ *ht_item;
+ struct cache_ht_item_data_ *ht_item_data;
+ struct cache_policy_ *policy;
+ struct cache_policy_item_ *item, *next_item;
+ size_t entry_size;
+ int i;
+
+ if (entry->params->entry_type == CET_COMMON) {
+ common_entry = (struct cache_common_entry_ *)entry;
+
+ entry_size = 0;
+ HASHTABLE_FOREACH(&(common_entry->items), ht_item) {
+ HASHTABLE_ENTRY_FOREACH(ht_item, data, ht_item_data)
+ {
+ free(ht_item_data->key);
+ free(ht_item_data->value);
+ }
+ entry_size += HASHTABLE_ENTRY_SIZE(ht_item, data);
+ HASHTABLE_ENTRY_CLEAR(ht_item, data);
+ }
+
+ common_entry->items_size -= entry_size;
+ for (i = 0; i < common_entry->policies_size; ++i) {
+ policy = common_entry->policies[i];
+
+ next_item = NULL;
+ item = policy->get_first_item_func(policy);
+ while (item != NULL) {
+ next_item = policy->get_next_item_func(policy,
+ item);
+ policy->remove_item_func(policy, item);
+ policy->destroy_item_func(item);
+ item = next_item;
+ }
+ }
+ } else {
+ mp_entry = (struct cache_mp_entry_ *)entry;
+
+ if (mp_entry->rs_size == 0) {
+ if (mp_entry->completed_write_session != NULL) {
+ destroy_cache_mp_write_session(
+ mp_entry->completed_write_session);
+ mp_entry->completed_write_session = NULL;
+ }
+
+ memset(&mp_entry->creation_time, 0,
+ sizeof(struct timeval));
+ memset(&mp_entry->last_request_time, 0,
+ sizeof(struct timeval));
+ }
+ }
+}
+
+/*
+ * When passed to the flush_cache_policy, ensures that all old elements are
+ * deleted.
+ */
+static int
+cache_lifetime_common_continue_func(struct cache_common_entry_ *entry,
+ struct cache_policy_item_ *item)
+{
+
+ return ((item->last_request_time.tv_sec - item->creation_time.tv_sec >
+ entry->common_params.max_lifetime.tv_sec) ? 1: 0);
+}
+
+/*
+ * When passed to the flush_cache_policy, ensures that all elements, that
+ * exceed the size limit, are deleted.
+ */
+static int
+cache_elemsize_common_continue_func(struct cache_common_entry_ *entry,
+ struct cache_policy_item_ *item)
+{
+
+ return ((entry->items_size > entry->common_params.satisf_elemsize) ? 1
+ : 0);
+}
+
+/*
+ * Removes the elements from the cache entry, while the continue_func returns 1.
+ */
+static void
+flush_cache_policy(struct cache_common_entry_ *entry,
+ struct cache_policy_ *policy,
+ struct cache_policy_ *connected_policy,
+ int (*continue_func)(struct cache_common_entry_ *,
+ struct cache_policy_item_ *))
+{
+ struct cache_policy_item_ *item, *next_item, *connected_item;
+ struct cache_ht_item_ *ht_item;
+ struct cache_ht_item_data_ *ht_item_data, ht_key;
+ hashtable_index_t hash;
+
+ assert(policy != NULL);
+
+ next_item = NULL;
+ item = policy->get_first_item_func(policy);
+ while ((item != NULL) && (continue_func(entry, item) == 1)) {
+ next_item = policy->get_next_item_func(policy, item);
+
+ connected_item = item->connected_item;
+ policy->remove_item_func(policy, item);
+
+ memset(&ht_key, 0, sizeof(struct cache_ht_item_data_));
+ ht_key.key = item->key;
+ ht_key.key_size = item->key_size;
+
+ hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &entry->items,
+ &ht_key);
+ assert(hash >= 0);
+ assert(hash < HASHTABLE_ENTRIES_COUNT(&entry->items));
+
+ ht_item = HASHTABLE_GET_ENTRY(&(entry->items), hash);
+ ht_item_data = HASHTABLE_ENTRY_FIND(cache_ht_, ht_item,
+ &ht_key);
+ assert(ht_item_data != NULL);
+ free(ht_item_data->key);
+ free(ht_item_data->value);
+ HASHTABLE_ENTRY_REMOVE(cache_ht_, ht_item, ht_item_data);
+ --entry->items_size;
+
+ policy->destroy_item_func(item);
+
+ if (connected_item != NULL) {
+ connected_policy->remove_item_func(connected_policy,
+ connected_item);
+ connected_policy->destroy_item_func(connected_item);
+ }
+
+ item = next_item;
+ }
+}
+
+static void
+flush_cache_entry(struct cache_entry_ *entry)
+{
+ struct cache_mp_entry_ *mp_entry;
+ struct cache_common_entry_ *common_entry;
+ struct cache_policy_ *policy, *connected_policy;
+
+ connected_policy = NULL;
+ if (entry->params->entry_type == CET_COMMON) {
+ common_entry = (struct cache_common_entry_ *)entry;
+ if ((common_entry->common_params.max_lifetime.tv_sec != 0) ||
+ (common_entry->common_params.max_lifetime.tv_usec != 0)) {
+
+ policy = common_entry->policies[0];
+ if (common_entry->policies_size > 1)
+ connected_policy = common_entry->policies[1];
+
+ flush_cache_policy(common_entry, policy,
+ connected_policy,
+ cache_lifetime_common_continue_func);
+ }
+
+
+ if ((common_entry->common_params.max_elemsize != 0) &&
+ common_entry->items_size >
+ common_entry->common_params.max_elemsize) {
+
+ if (common_entry->policies_size > 1) {
+ policy = common_entry->policies[1];
+ connected_policy = common_entry->policies[0];
+ } else {
+ policy = common_entry->policies[0];
+ connected_policy = NULL;
+ }
+
+ flush_cache_policy(common_entry, policy,
+ connected_policy,
+ cache_elemsize_common_continue_func);
+ }
+ } else {
+ mp_entry = (struct cache_mp_entry_ *)entry;
+
+ if ((mp_entry->mp_params.max_lifetime.tv_sec != 0)
+ || (mp_entry->mp_params.max_lifetime.tv_usec != 0)) {
+
+ if (mp_entry->last_request_time.tv_sec -
+ mp_entry->last_request_time.tv_sec >
+ mp_entry->mp_params.max_lifetime.tv_sec)
+ clear_cache_entry(entry);
+ }
+ }
+}
+
+struct cache_ *
+init_cache(struct cache_params const *params)
+{
+ struct cache_ *retval;
+
+ TRACE_IN(init_cache);
+ assert(params != NULL);
+
+ retval = (struct cache_ *)malloc(sizeof(struct cache_));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct cache_));
+
+ assert(params != NULL);
+ memcpy(&retval->params, params, sizeof(struct cache_params));
+
+ retval->entries = (struct cache_entry_ **)malloc(
+ sizeof(struct cache_entry_ *) * INITIAL_ENTRIES_CAPACITY);
+ assert(retval->entries != NULL);
+ memset(retval->entries, 0, sizeof(sizeof(struct cache_entry_ *)
+ * INITIAL_ENTRIES_CAPACITY));
+
+ retval->entries_capacity = INITIAL_ENTRIES_CAPACITY;
+ retval->entries_size = 0;
+
+ TRACE_OUT(init_cache);
+ return (retval);
+}
+
+void
+destroy_cache(struct cache_ *the_cache)
+{
+
+ TRACE_IN(destroy_cache);
+ assert(the_cache != NULL);
+
+ if (the_cache->entries != NULL) {
+ size_t i;
+ for (i = 0; i < the_cache->entries_size; ++i)
+ destroy_cache_entry(the_cache->entries[i]);
+
+ free(the_cache->entries);
+ }
+
+ free(the_cache);
+ TRACE_OUT(destroy_cache);
+}
+
+int
+register_cache_entry(struct cache_ *the_cache,
+ struct cache_entry_params const *params)
+{
+ int policies_size;
+ size_t entry_name_size;
+ struct cache_common_entry_ *new_common_entry;
+ struct cache_mp_entry_ *new_mp_entry;
+
+ TRACE_IN(register_cache_entry);
+ assert(the_cache != NULL);
+
+ if (find_cache_entry(the_cache, params->entry_name) != NULL) {
+ TRACE_OUT(register_cache_entry);
+ return (-1);
+ }
+
+ if (the_cache->entries_size == the_cache->entries_capacity) {
+ struct cache_entry_ **new_entries;
+ size_t new_capacity;
+
+ new_capacity = the_cache->entries_capacity +
+ ENTRIES_CAPACITY_STEP;
+ new_entries = (struct cache_entry_ **)malloc(
+ sizeof(struct cache_entry_ *) * new_capacity);
+ assert(new_entries != NULL);
+
+ memset(new_entries, 0, sizeof(struct cache_entry_ *) *
+ new_capacity);
+ memcpy(new_entries, the_cache->entries,
+ sizeof(struct cache_entry_ *)
+ * the_cache->entries_size);
+
+ free(the_cache->entries);
+ the_cache->entries = new_entries;
+ }
+
+ entry_name_size = strlen(params->entry_name);
+ switch (params->entry_type)
+ {
+ case CET_COMMON:
+ new_common_entry = (struct cache_common_entry_ *)malloc(
+ sizeof(struct cache_common_entry_));
+ assert(new_common_entry != NULL);
+ memset(new_common_entry, 0, sizeof(struct cache_common_entry_));
+
+ memcpy(&new_common_entry->common_params, params,
+ sizeof(struct common_cache_entry_params));
+ new_common_entry->params =
+ (struct cache_entry_params *)&new_common_entry->common_params;
+
+ new_common_entry->common_params.entry_name = (char *)malloc(
+ entry_name_size+1);
+ assert(new_common_entry->common_params.entry_name != NULL);
+ memset(new_common_entry->common_params.entry_name, 0,
+ entry_name_size + 1);
+ strncpy(new_common_entry->common_params.entry_name,
+ params->entry_name, entry_name_size);
+ new_common_entry->name =
+ new_common_entry->common_params.entry_name;
+
+ HASHTABLE_INIT(&(new_common_entry->items),
+ struct cache_ht_item_data_, data,
+ new_common_entry->common_params.cache_entries_size);
+
+ if (new_common_entry->common_params.policy == CPT_FIFO)
+ policies_size = 1;
+ else
+ policies_size = 2;
+
+ new_common_entry->policies = (struct cache_policy_ **)malloc(
+ sizeof(struct cache_policy_ *) * policies_size);
+ assert(new_common_entry->policies != NULL);
+ memset(new_common_entry->policies, 0,
+ sizeof(struct cache_policy_ *) * policies_size);
+
+ new_common_entry->policies_size = policies_size;
+ new_common_entry->policies[0] = init_cache_fifo_policy();
+
+ if (policies_size > 1) {
+ switch (new_common_entry->common_params.policy) {
+ case CPT_LRU:
+ new_common_entry->policies[1] =
+ init_cache_lru_policy();
+ break;
+ case CPT_LFU:
+ new_common_entry->policies[1] =
+ init_cache_lfu_policy();
+ break;
+ default:
+ break;
+ }
+ }
+
+ new_common_entry->get_time_func =
+ the_cache->params.get_time_func;
+ the_cache->entries[the_cache->entries_size++] =
+ (struct cache_entry_ *)new_common_entry;
+ break;
+ case CET_MULTIPART:
+ new_mp_entry = (struct cache_mp_entry_ *)malloc(
+ sizeof(struct cache_mp_entry_));
+ assert(new_mp_entry != NULL);
+ memset(new_mp_entry, 0, sizeof(struct cache_mp_entry_));
+
+ memcpy(&new_mp_entry->mp_params, params,
+ sizeof(struct mp_cache_entry_params));
+ new_mp_entry->params =
+ (struct cache_entry_params *)&new_mp_entry->mp_params;
+
+ new_mp_entry->mp_params.entry_name = (char *)malloc(
+ entry_name_size+1);
+ assert(new_mp_entry->mp_params.entry_name != NULL);
+ memset(new_mp_entry->mp_params.entry_name, 0,
+ entry_name_size + 1);
+ strncpy(new_mp_entry->mp_params.entry_name, params->entry_name,
+ entry_name_size);
+ new_mp_entry->name = new_mp_entry->mp_params.entry_name;
+
+ TAILQ_INIT(&new_mp_entry->ws_head);
+ TAILQ_INIT(&new_mp_entry->rs_head);
+
+ new_mp_entry->get_time_func = the_cache->params.get_time_func;
+ the_cache->entries[the_cache->entries_size++] =
+ (struct cache_entry_ *)new_mp_entry;
+ break;
+ }
+
+
+ qsort(the_cache->entries, the_cache->entries_size,
+ sizeof(struct cache_entry_ *), entries_qsort_cmp_func);
+
+ TRACE_OUT(register_cache_entry);
+ return (0);
+}
+
+int
+unregister_cache_entry(struct cache_ *the_cache, const char *entry_name)
+{
+ struct cache_entry_ **del_ent;
+
+ TRACE_IN(unregister_cache_entry);
+ assert(the_cache != NULL);
+
+ del_ent = find_cache_entry_p(the_cache, entry_name);
+ if (del_ent != NULL) {
+ destroy_cache_entry(*del_ent);
+ --the_cache->entries_size;
+
+ memmove(del_ent, del_ent + 1,
+ (&(the_cache->entries[--the_cache->entries_size]) -
+ del_ent) * sizeof(struct cache_entry_ *));
+
+ TRACE_OUT(unregister_cache_entry);
+ return (0);
+ } else {
+ TRACE_OUT(unregister_cache_entry);
+ return (-1);
+ }
+}
+
+struct cache_entry_ *
+find_cache_entry(struct cache_ *the_cache, const char *entry_name)
+{
+ struct cache_entry_ **result;
+
+ TRACE_IN(find_cache_entry);
+ result = find_cache_entry_p(the_cache, entry_name);
+
+ if (result == NULL) {
+ TRACE_OUT(find_cache_entry);
+ return (NULL);
+ } else {
+ TRACE_OUT(find_cache_entry);
+ return (*result);
+ }
+}
+
+/*
+ * Tries to read the element with the specified key from the cache. If the
+ * value_size is too small, it will be filled with the proper number, and
+ * the user will need to call cache_read again with the value buffer, that
+ * is large enough.
+ * Function returns 0 on success, -1 on error, and -2 if the value_size is too
+ * small.
+ */
+int
+cache_read(struct cache_entry_ *entry, const char *key, size_t key_size,
+ char *value, size_t *value_size)
+{
+ struct cache_common_entry_ *common_entry;
+ struct cache_ht_item_data_ item_data, *find_res;
+ struct cache_ht_item_ *item;
+ hashtable_index_t hash;
+ struct cache_policy_item_ *connected_item;
+
+ TRACE_IN(cache_read);
+ assert(entry != NULL);
+ assert(key != NULL);
+ assert(value_size != NULL);
+ assert(entry->params->entry_type == CET_COMMON);
+
+ common_entry = (struct cache_common_entry_ *)entry;
+
+ memset(&item_data, 0, sizeof(struct cache_ht_item_data_));
+ /* can't avoid the cast here */
+ item_data.key = (char *)key;
+ item_data.key_size = key_size;
+
+ hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &common_entry->items,
+ &item_data);
+ assert(hash >= 0);
+ assert(hash < HASHTABLE_ENTRIES_COUNT(&common_entry->items));
+
+ item = HASHTABLE_GET_ENTRY(&(common_entry->items), hash);
+ find_res = HASHTABLE_ENTRY_FIND(cache_ht_, item, &item_data);
+ if (find_res == NULL) {
+ TRACE_OUT(cache_read);
+ return (-1);
+ }
+
+ if ((common_entry->common_params.max_lifetime.tv_sec != 0) ||
+ (common_entry->common_params.max_lifetime.tv_usec != 0)) {
+
+ if (find_res->fifo_policy_item->last_request_time.tv_sec -
+ find_res->fifo_policy_item->creation_time.tv_sec >
+ common_entry->common_params.max_lifetime.tv_sec) {
+
+ free(find_res->key);
+ free(find_res->value);
+
+ connected_item =
+ find_res->fifo_policy_item->connected_item;
+ if (connected_item != NULL) {
+ common_entry->policies[1]->remove_item_func(
+ common_entry->policies[1],
+ connected_item);
+ common_entry->policies[1]->destroy_item_func(
+ connected_item);
+ }
+
+ common_entry->policies[0]->remove_item_func(
+ common_entry->policies[0],
+ find_res->fifo_policy_item);
+ common_entry->policies[0]->destroy_item_func(
+ find_res->fifo_policy_item);
+
+ HASHTABLE_ENTRY_REMOVE(cache_ht_, item, find_res);
+ --common_entry->items_size;
+ }
+ }
+
+ if ((*value_size < find_res->value_size) || (value == NULL)) {
+ *value_size = find_res->value_size;
+ TRACE_OUT(cache_read);
+ return (-2);
+ }
+
+ *value_size = find_res->value_size;
+ memcpy(value, find_res->value, find_res->value_size);
+
+ ++find_res->fifo_policy_item->request_count;
+ common_entry->get_time_func(
+ &find_res->fifo_policy_item->last_request_time);
+ common_entry->policies[0]->update_item_func(common_entry->policies[0],
+ find_res->fifo_policy_item);
+
+ if (find_res->fifo_policy_item->connected_item != NULL) {
+ connected_item = find_res->fifo_policy_item->connected_item;
+ memcpy(&connected_item->last_request_time,
+ &find_res->fifo_policy_item->last_request_time,
+ sizeof(struct timeval));
+ connected_item->request_count =
+ find_res->fifo_policy_item->request_count;
+
+ common_entry->policies[1]->update_item_func(
+ common_entry->policies[1], connected_item);
+ }
+
+ TRACE_OUT(cache_read);
+ return (0);
+}
+
+/*
+ * Writes the value with the specified key into the cache entry.
+ * Functions returns 0 on success, and -1 on error.
+ */
+int
+cache_write(struct cache_entry_ *entry, const char *key, size_t key_size,
+ char const *value, size_t value_size)
+{
+ struct cache_common_entry_ *common_entry;
+ struct cache_ht_item_data_ item_data, *find_res;
+ struct cache_ht_item_ *item;
+ hashtable_index_t hash;
+
+ struct cache_policy_ *policy, *connected_policy;
+ struct cache_policy_item_ *policy_item;
+ struct cache_policy_item_ *connected_policy_item;
+
+ TRACE_IN(cache_write);
+ assert(entry != NULL);
+ assert(key != NULL);
+ assert(value != NULL);
+ assert(entry->params->entry_type == CET_COMMON);
+
+ common_entry = (struct cache_common_entry_ *)entry;
+
+ memset(&item_data, 0, sizeof(struct cache_ht_item_data_));
+ /* can't avoid the cast here */
+ item_data.key = (char *)key;
+ item_data.key_size = key_size;
+
+ hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &common_entry->items,
+ &item_data);
+ assert(hash >= 0);
+ assert(hash < HASHTABLE_ENTRIES_COUNT(&common_entry->items));
+
+ item = HASHTABLE_GET_ENTRY(&(common_entry->items), hash);
+ find_res = HASHTABLE_ENTRY_FIND(cache_ht_, item, &item_data);
+ if (find_res != NULL) {
+ TRACE_OUT(cache_write);
+ return (-1);
+ }
+
+ item_data.key = (char *)malloc(key_size);
+ memcpy(item_data.key, key, key_size);
+
+ item_data.value = (char *)malloc(value_size);
+ assert(item_data.value != NULL);
+
+ memcpy(item_data.value, value, value_size);
+ item_data.value_size = value_size;
+
+ policy_item = common_entry->policies[0]->create_item_func();
+ policy_item->key = item_data.key;
+ policy_item->key_size = item_data.key_size;
+ common_entry->get_time_func(&policy_item->creation_time);
+
+ if (common_entry->policies_size > 1) {
+ connected_policy_item =
+ common_entry->policies[1]->create_item_func();
+ memcpy(&connected_policy_item->creation_time,
+ &policy_item->creation_time,
+ sizeof(struct timeval));
+ connected_policy_item->key = policy_item->key;
+ connected_policy_item->key_size = policy_item->key_size;
+
+ connected_policy_item->connected_item = policy_item;
+ policy_item->connected_item = connected_policy_item;
+ }
+
+ item_data.fifo_policy_item = policy_item;
+
+ common_entry->policies[0]->add_item_func(common_entry->policies[0],
+ policy_item);
+ if (common_entry->policies_size > 1)
+ common_entry->policies[1]->add_item_func(
+ common_entry->policies[1], connected_policy_item);
+
+ HASHTABLE_ENTRY_STORE(cache_ht_, item, &item_data);
+ ++common_entry->items_size;
+
+ if ((common_entry->common_params.max_elemsize != 0) &&
+ (common_entry->items_size >
+ common_entry->common_params.max_elemsize)) {
+ if (common_entry->policies_size > 1) {
+ policy = common_entry->policies[1];
+ connected_policy = common_entry->policies[0];
+ } else {
+ policy = common_entry->policies[0];
+ connected_policy = NULL;
+ }
+
+ flush_cache_policy(common_entry, policy, connected_policy,
+ cache_elemsize_common_continue_func);
+ }
+
+ TRACE_OUT(cache_write);
+ return (0);
+}
+
+/*
+ * Initializes the write session for the specified multipart entry. This
+ * session then should be filled with data either committed or abandoned by
+ * using close_cache_mp_write_session or abandon_cache_mp_write_session
+ * respectively.
+ * Returns NULL on errors (when there are too many opened write sessions for
+ * the entry).
+ */
+struct cache_mp_write_session_ *
+open_cache_mp_write_session(struct cache_entry_ *entry)
+{
+ struct cache_mp_entry_ *mp_entry;
+ struct cache_mp_write_session_ *retval;
+
+ TRACE_IN(open_cache_mp_write_session);
+ assert(entry != NULL);
+ assert(entry->params->entry_type == CET_MULTIPART);
+ mp_entry = (struct cache_mp_entry_ *)entry;
+
+ if ((mp_entry->mp_params.max_sessions > 0) &&
+ (mp_entry->ws_size == mp_entry->mp_params.max_sessions)) {
+ TRACE_OUT(open_cache_mp_write_session);
+ return (NULL);
+ }
+
+ retval = (struct cache_mp_write_session_ *)malloc(
+ sizeof(struct cache_mp_write_session_));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct cache_mp_write_session_));
+
+ TAILQ_INIT(&retval->items);
+ retval->parent_entry = mp_entry;
+
+ TAILQ_INSERT_HEAD(&mp_entry->ws_head, retval, entries);
+ ++mp_entry->ws_size;
+
+ TRACE_OUT(open_cache_mp_write_session);
+ return (retval);
+}
+
+/*
+ * Writes data to the specified session. Return 0 on success and -1 on errors
+ * (when write session size limit is exceeded).
+ */
+int
+cache_mp_write(struct cache_mp_write_session_ *ws, char *data,
+ size_t data_size)
+{
+ struct cache_mp_data_item_ *new_item;
+
+ TRACE_IN(cache_mp_write);
+ assert(ws != NULL);
+ assert(ws->parent_entry != NULL);
+ assert(ws->parent_entry->params->entry_type == CET_MULTIPART);
+
+ if ((ws->parent_entry->mp_params.max_elemsize > 0) &&
+ (ws->parent_entry->mp_params.max_elemsize == ws->items_size)) {
+ TRACE_OUT(cache_mp_write);
+ return (-1);
+ }
+
+ new_item = (struct cache_mp_data_item_ *)malloc(
+ sizeof(struct cache_mp_data_item_));
+ assert(new_item != NULL);
+ memset(new_item, 0, sizeof(struct cache_mp_data_item_));
+
+ new_item->value = (char *)malloc(data_size);
+ assert(new_item->value != NULL);
+ memcpy(new_item->value, data, data_size);
+ new_item->value_size = data_size;
+
+ TAILQ_INSERT_TAIL(&ws->items, new_item, entries);
+ ++ws->items_size;
+
+ TRACE_OUT(cache_mp_write);
+ return (0);
+}
+
+/*
+ * Abandons the write session and frees all the connected resources.
+ */
+void
+abandon_cache_mp_write_session(struct cache_mp_write_session_ *ws)
+{
+
+ TRACE_IN(abandon_cache_mp_write_session);
+ assert(ws != NULL);
+ assert(ws->parent_entry != NULL);
+ assert(ws->parent_entry->params->entry_type == CET_MULTIPART);
+
+ TAILQ_REMOVE(&ws->parent_entry->ws_head, ws, entries);
+ --ws->parent_entry->ws_size;
+
+ destroy_cache_mp_write_session(ws);
+ TRACE_OUT(abandon_cache_mp_write_session);
+}
+
+/*
+ * Commits the session to the entry, for which it was created.
+ */
+void
+close_cache_mp_write_session(struct cache_mp_write_session_ *ws)
+{
+
+ TRACE_IN(close_cache_mp_write_session);
+ assert(ws != NULL);
+ assert(ws->parent_entry != NULL);
+ assert(ws->parent_entry->params->entry_type == CET_MULTIPART);
+
+ TAILQ_REMOVE(&ws->parent_entry->ws_head, ws, entries);
+ --ws->parent_entry->ws_size;
+
+ if (ws->parent_entry->completed_write_session == NULL) {
+ /*
+ * If there is no completed session yet, this will be the one
+ */
+ ws->parent_entry->get_time_func(
+ &ws->parent_entry->creation_time);
+ ws->parent_entry->completed_write_session = ws;
+ } else {
+ /*
+ * If there is a completed session, then we'll save our session
+ * as a pending session. If there is already a pending session,
+ * it would be destroyed.
+ */
+ if (ws->parent_entry->pending_write_session != NULL)
+ destroy_cache_mp_write_session(
+ ws->parent_entry->pending_write_session);
+
+ ws->parent_entry->pending_write_session = ws;
+ }
+ TRACE_OUT(close_cache_mp_write_session);
+}
+
+/*
+ * Opens read session for the specified entry. Returns NULL on errors (when
+ * there are no data in the entry, or the data are obsolete).
+ */
+struct cache_mp_read_session_ *
+open_cache_mp_read_session(struct cache_entry_ *entry)
+{
+ struct cache_mp_entry_ *mp_entry;
+ struct cache_mp_read_session_ *retval;
+
+ TRACE_IN(open_cache_mp_read_session);
+ assert(entry != NULL);
+ assert(entry->params->entry_type == CET_MULTIPART);
+ mp_entry = (struct cache_mp_entry_ *)entry;
+
+ if (mp_entry->completed_write_session == NULL) {
+ TRACE_OUT(open_cache_mp_read_session);
+ return (NULL);
+ }
+
+ if ((mp_entry->mp_params.max_lifetime.tv_sec != 0)
+ || (mp_entry->mp_params.max_lifetime.tv_usec != 0)) {
+ if (mp_entry->last_request_time.tv_sec -
+ mp_entry->last_request_time.tv_sec >
+ mp_entry->mp_params.max_lifetime.tv_sec) {
+ flush_cache_entry(entry);
+ TRACE_OUT(open_cache_mp_read_session);
+ return (NULL);
+ }
+ }
+
+ retval = (struct cache_mp_read_session_ *)malloc(
+ sizeof(struct cache_mp_read_session_));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct cache_mp_read_session_));
+
+ retval->parent_entry = mp_entry;
+ retval->current_item = TAILQ_FIRST(
+ &mp_entry->completed_write_session->items);
+
+ TAILQ_INSERT_HEAD(&mp_entry->rs_head, retval, entries);
+ ++mp_entry->rs_size;
+
+ mp_entry->get_time_func(&mp_entry->last_request_time);
+ TRACE_OUT(open_cache_mp_read_session);
+ return (retval);
+}
+
+/*
+ * Reads the data from the read session - step by step.
+ * Returns 0 on success, -1 on error (when there are no more data), and -2 if
+ * the data_size is too small. In the last case, data_size would be filled
+ * the proper value.
+ */
+int
+cache_mp_read(struct cache_mp_read_session_ *rs, char *data, size_t *data_size)
+{
+
+ TRACE_IN(cache_mp_read);
+ assert(rs != NULL);
+
+ if (rs->current_item == NULL) {
+ TRACE_OUT(cache_mp_read);
+ return (-1);
+ }
+
+ if (rs->current_item->value_size > *data_size) {
+ *data_size = rs->current_item->value_size;
+ if (data == NULL) {
+ TRACE_OUT(cache_mp_read);
+ return (0);
+ }
+
+ TRACE_OUT(cache_mp_read);
+ return (-2);
+ }
+
+ *data_size = rs->current_item->value_size;
+ memcpy(data, rs->current_item->value, rs->current_item->value_size);
+ rs->current_item = TAILQ_NEXT(rs->current_item, entries);
+
+ TRACE_OUT(cache_mp_read);
+ return (0);
+}
+
+/*
+ * Closes the read session. If there are no more read sessions and there is
+ * a pending write session, it will be committed and old
+ * completed_write_session will be destroyed.
+ */
+void
+close_cache_mp_read_session(struct cache_mp_read_session_ *rs)
+{
+
+ TRACE_IN(close_cache_mp_read_session);
+ assert(rs != NULL);
+ assert(rs->parent_entry != NULL);
+
+ TAILQ_REMOVE(&rs->parent_entry->rs_head, rs, entries);
+ --rs->parent_entry->rs_size;
+
+ if ((rs->parent_entry->rs_size == 0) &&
+ (rs->parent_entry->pending_write_session != NULL)) {
+ destroy_cache_mp_write_session(
+ rs->parent_entry->completed_write_session);
+ rs->parent_entry->completed_write_session =
+ rs->parent_entry->pending_write_session;
+ rs->parent_entry->pending_write_session = NULL;
+ }
+
+ destroy_cache_mp_read_session(rs);
+ TRACE_OUT(close_cache_mp_read_session);
+}
+
+int
+transform_cache_entry(struct cache_entry_ *entry,
+ enum cache_transformation_t transformation)
+{
+
+ TRACE_IN(transform_cache_entry);
+ switch (transformation) {
+ case CTT_CLEAR:
+ clear_cache_entry(entry);
+ TRACE_OUT(transform_cache_entry);
+ return (0);
+ case CTT_FLUSH:
+ flush_cache_entry(entry);
+ TRACE_OUT(transform_cache_entry);
+ return (0);
+ default:
+ TRACE_OUT(transform_cache_entry);
+ return (-1);
+ }
+}
+
+int
+transform_cache_entry_part(struct cache_entry_ *entry,
+ enum cache_transformation_t transformation, const char *key_part,
+ size_t key_part_size, enum part_position_t part_position)
+{
+ struct cache_common_entry_ *common_entry;
+ struct cache_ht_item_ *ht_item;
+ struct cache_ht_item_data_ *ht_item_data, ht_key;
+
+ struct cache_policy_item_ *item, *connected_item;
+
+ TRACE_IN(transform_cache_entry_part);
+ if (entry->params->entry_type != CET_COMMON) {
+ TRACE_OUT(transform_cache_entry_part);
+ return (-1);
+ }
+
+ if (transformation != CTT_CLEAR) {
+ TRACE_OUT(transform_cache_entry_part);
+ return (-1);
+ }
+
+ memset(&ht_key, 0, sizeof(struct cache_ht_item_data_));
+ ht_key.key = (char *)key_part; /* can't avoid casting here */
+ ht_key.key_size = key_part_size;
+
+ common_entry = (struct cache_common_entry_ *)entry;
+ HASHTABLE_FOREACH(&(common_entry->items), ht_item) {
+ do {
+ ht_item_data = HASHTABLE_ENTRY_FIND_SPECIAL(cache_ht_,
+ ht_item, &ht_key,
+ ht_items_fixed_size_left_cmp_func);
+
+ if (ht_item_data != NULL) {
+ item = ht_item_data->fifo_policy_item;
+ connected_item = item->connected_item;
+
+ common_entry->policies[0]->remove_item_func(
+ common_entry->policies[0],
+ item);
+
+ free(ht_item_data->key);
+ free(ht_item_data->value);
+ HASHTABLE_ENTRY_REMOVE(cache_ht_, ht_item,
+ ht_item_data);
+ --common_entry->items_size;
+
+ common_entry->policies[0]->destroy_item_func(
+ item);
+ if (common_entry->policies_size == 2) {
+ common_entry->policies[1]->remove_item_func(
+ common_entry->policies[1],
+ connected_item);
+ common_entry->policies[1]->destroy_item_func(
+ connected_item);
+ }
+ }
+ } while (ht_item_data != NULL);
+ }
+
+ TRACE_OUT(transform_cache_entry_part);
+ return (0);
+}
diff --git a/usr.sbin/nscd/cachelib.h b/usr.sbin/nscd/cachelib.h
new file mode 100644
index 0000000..d67e830
--- /dev/null
+++ b/usr.sbin/nscd/cachelib.h
@@ -0,0 +1,281 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_CACHELIB_H__
+#define __CACHED_CACHELIB_H__
+
+#include <sys/queue.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include "hashtable.h"
+#include "cacheplcs.h"
+
+enum cache_entry_t {
+ CET_COMMON = 0, /* cache item is atomic */
+ CET_MULTIPART /* cache item is formed part by part */
+};
+
+enum cache_transformation_t {
+ CTT_FLUSH = 0, /* flush the cache - delete all obsolete items */
+ CTT_CLEAR = 1 /* delete all items in the cache */
+};
+
+/* cache deletion policy type enum */
+enum cache_policy_t {
+ CPT_FIFO = 0, /* first-in first-out */
+ CPT_LRU = 1, /* least recently used */
+ CPT_LFU = 2 /* least frequently used */
+};
+
+/* multipart sessions can be used for reading and writing */
+enum cache_mp_session_t {
+ CMPT_READ_SESSION,
+ CMPT_WRITE_SESSION
+};
+
+/*
+ * When doing partial transformations of entries (which are applied for
+ * elements with keys, that contain specified buffer in its left or
+ * right part), this enum will show the needed position of the key part.
+ */
+enum part_position_t {
+ KPPT_LEFT,
+ KPPT_RIGHT
+};
+
+/* num_levels attribute is obsolete, i think - user can always emulate it
+ * by using one entry.
+ * get_time_func is needed to have the clocks-independent counter
+ */
+struct cache_params
+{
+ void (*get_time_func)(struct timeval *);
+};
+
+/*
+ * base structure - normal_cache_entry_params and multipart_cache_entry_params
+ * are "inherited" from it
+ */
+struct cache_entry_params
+{
+ enum cache_entry_t entry_type;
+ char *entry_name;
+};
+
+/* params, used for most entries */
+struct common_cache_entry_params
+{
+ /* inherited fields */
+ enum cache_entry_t entry_type;
+
+ /* unique fields */
+ char *entry_name;
+ size_t cache_entries_size;
+
+ size_t max_elemsize; /* if 0 then no check is made */
+ size_t satisf_elemsize; /* if entry size is exceeded,
+ * this number of elements will be left,
+ * others will be deleted */
+ struct timeval max_lifetime; /* if 0 then no check is made */
+ enum cache_policy_t policy; /* policy used for transformations */
+};
+
+/* params, used for multipart entries */
+struct mp_cache_entry_params
+{
+ /* inherited fields */
+ enum cache_entry_t entry_type;
+ char *entry_name;
+
+ /* unique fields */
+ size_t max_elemsize; /* if 0 then no check is made */
+ size_t max_sessions; /* maximum number of active sessions */
+
+ struct timeval max_lifetime; /* maximum elements lifetime */
+};
+
+struct cache_ht_item_data_
+{
+ /* key is the bytes sequence only - not the null-terminated string */
+ char *key;
+ size_t key_size;
+
+ char *value;
+ size_t value_size;
+
+ struct cache_policy_item_ *fifo_policy_item;
+};
+
+struct cache_ht_item_
+{
+ HASHTABLE_ENTRY_HEAD(ht_item_, struct cache_ht_item_data_) data;
+};
+
+struct cache_entry_
+{
+ char *name;
+ struct cache_entry_params *params;
+};
+
+struct cache_common_entry_
+{
+ char *name;
+ struct cache_entry_params *params;
+
+ struct common_cache_entry_params common_params;
+
+ HASHTABLE_HEAD(cache_ht_, cache_ht_item_) items;
+ size_t items_size;
+
+ /*
+ * Entry always has the FIFO policy, that is used to eliminate old
+ * elements (the ones, with lifetime more than max_lifetime). Besides,
+ * user can specify another policy to be applied, when there are too
+ * many elements in the entry. So policies_size can be 1 or 2.
+ */
+ struct cache_policy_ **policies;
+ size_t policies_size;
+
+ void (*get_time_func)(struct timeval *);
+};
+
+struct cache_mp_data_item_ {
+ char *value;
+ size_t value_size;
+
+ TAILQ_ENTRY(cache_mp_data_item_) entries;
+};
+
+struct cache_mp_write_session_
+{
+ struct cache_mp_entry_ *parent_entry;
+
+ /*
+ * All items are accumulated in this queue. When the session is
+ * committed, they all will be copied to the multipart entry.
+ */
+ TAILQ_HEAD(cache_mp_data_item_head, cache_mp_data_item_) items;
+ size_t items_size;
+
+ TAILQ_ENTRY(cache_mp_write_session_) entries;
+};
+
+struct cache_mp_read_session_
+{
+ struct cache_mp_entry_ *parent_entry;
+ struct cache_mp_data_item_ *current_item;
+
+ TAILQ_ENTRY(cache_mp_read_session_) entries;
+};
+
+struct cache_mp_entry_
+{
+ char *name;
+ struct cache_entry_params *params;
+
+ struct mp_cache_entry_params mp_params;
+
+ /* All opened write sessions */
+ TAILQ_HEAD(write_sessions_head, cache_mp_write_session_) ws_head;
+ size_t ws_size;
+
+ /* All opened read sessions */
+ TAILQ_HEAD(read_sessions_head, cache_mp_read_session_) rs_head;
+ size_t rs_size;
+
+ /*
+ * completed_write_session is the committed write sessions. All read
+ * sessions use data from it. If the completed_write_session is out of
+ * date, but still in use by some of the read sessions, the newly
+ * committed write session is stored in the pending_write_session.
+ * In such a case, completed_write_session will be substituted with
+ * pending_write_session as soon as it won't be used by any of
+ * the read sessions.
+ */
+ struct cache_mp_write_session_ *completed_write_session;
+ struct cache_mp_write_session_ *pending_write_session;
+ struct timeval creation_time;
+ struct timeval last_request_time;
+
+ void (*get_time_func)(struct timeval *);
+};
+
+struct cache_
+{
+ struct cache_params params;
+
+ struct cache_entry_ **entries;
+ size_t entries_capacity;
+ size_t entries_size;
+};
+
+/* simple abstractions - for not to write "struct" every time */
+typedef struct cache_ *cache;
+typedef struct cache_entry_ *cache_entry;
+typedef struct cache_mp_write_session_ *cache_mp_write_session;
+typedef struct cache_mp_read_session_ *cache_mp_read_session;
+
+#define INVALID_CACHE (NULL)
+#define INVALID_CACHE_ENTRY (NULL)
+#define INVALID_CACHE_MP_WRITE_SESSION (NULL)
+#define INVALID_CACHE_MP_READ_SESSION (NULL)
+
+/*
+ * NOTE: all cache operations are thread-unsafe. You must ensure thread-safety
+ * externally, by yourself.
+ */
+
+/* cache initialization/destruction routines */
+extern cache init_cache(struct cache_params const *);
+extern void destroy_cache(cache);
+
+/* cache entries manipulation routines */
+extern int register_cache_entry(cache, struct cache_entry_params const *);
+extern int unregister_cache_entry(cache, const char *);
+extern cache_entry find_cache_entry(cache, const char *);
+
+/* read/write operations used on common entries */
+extern int cache_read(cache_entry, const char *, size_t, char *, size_t *);
+extern int cache_write(cache_entry, const char *, size_t, char const *, size_t);
+
+/* read/write operations used on multipart entries */
+extern cache_mp_write_session open_cache_mp_write_session(cache_entry);
+extern int cache_mp_write(cache_mp_write_session, char *, size_t);
+extern void abandon_cache_mp_write_session(cache_mp_write_session);
+extern void close_cache_mp_write_session(cache_mp_write_session);
+
+extern cache_mp_read_session open_cache_mp_read_session(cache_entry);
+extern int cache_mp_read(cache_mp_read_session, char *, size_t *);
+extern void close_cache_mp_read_session(cache_mp_read_session);
+
+/* transformation routines */
+extern int transform_cache_entry(cache_entry, enum cache_transformation_t);
+extern int transform_cache_entry_part(cache_entry, enum cache_transformation_t,
+ const char *, size_t, enum part_position_t);
+
+#endif
diff --git a/usr.sbin/nscd/cacheplcs.c b/usr.sbin/nscd/cacheplcs.c
new file mode 100644
index 0000000..a7ee38d
--- /dev/null
+++ b/usr.sbin/nscd/cacheplcs.c
@@ -0,0 +1,590 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <assert.h>
+#include <string.h>
+#include "cacheplcs.h"
+#include "debug.h"
+
+static void cache_fifo_policy_update_item(struct cache_policy_ *,
+ struct cache_policy_item_ *);
+static void cache_lfu_policy_add_item(struct cache_policy_ *,
+ struct cache_policy_item_ *);
+static struct cache_policy_item_ * cache_lfu_policy_create_item(void);
+static void cache_lfu_policy_destroy_item(struct cache_policy_item_ *);
+static struct cache_policy_item_ *cache_lfu_policy_get_first_item(
+ struct cache_policy_ *);
+static struct cache_policy_item_ *cache_lfu_policy_get_last_item(
+ struct cache_policy_ *);
+static struct cache_policy_item_ *cache_lfu_policy_get_next_item(
+ struct cache_policy_ *, struct cache_policy_item_ *);
+static struct cache_policy_item_ *cache_lfu_policy_get_prev_item(
+ struct cache_policy_ *, struct cache_policy_item_ *);
+static void cache_lfu_policy_remove_item(struct cache_policy_ *,
+ struct cache_policy_item_ *);
+static void cache_lfu_policy_update_item(struct cache_policy_ *,
+ struct cache_policy_item_ *);
+static void cache_lru_policy_update_item(struct cache_policy_ *,
+ struct cache_policy_item_ *);
+static void cache_queue_policy_add_item(struct cache_policy_ *,
+ struct cache_policy_item_ *);
+static struct cache_policy_item_ * cache_queue_policy_create_item();
+static void cache_queue_policy_destroy_item(struct cache_policy_item_ *);
+static struct cache_policy_item_ *cache_queue_policy_get_first_item(
+ struct cache_policy_ *);
+static struct cache_policy_item_ *cache_queue_policy_get_last_item(
+ struct cache_policy_ *);
+static struct cache_policy_item_ *cache_queue_policy_get_next_item(
+ struct cache_policy_ *, struct cache_policy_item_ *);
+static struct cache_policy_item_ *cache_queue_policy_get_prev_item(
+ struct cache_policy_ *, struct cache_policy_item_ *);
+static void cache_queue_policy_remove_item(struct cache_policy_ *,
+ struct cache_policy_item_ *);
+static void destroy_cache_queue_policy(struct cache_queue_policy_ *);
+static struct cache_queue_policy_ *init_cache_queue_policy(void);
+
+/*
+ * All cache_queue_policy_XXX functions below will be used to fill
+ * the cache_queue_policy structure. They implement the most functionality of
+ * LRU and FIFO policies. LRU and FIFO policies are actually the
+ * cache_queue_policy_ with cache_update_item function changed.
+ */
+static struct cache_policy_item_ *
+cache_queue_policy_create_item()
+{
+ struct cache_queue_policy_item_ *retval;
+
+ TRACE_IN(cache_queue_policy_create_item);
+ retval = (struct cache_queue_policy_item_ *)malloc(
+ sizeof(struct cache_queue_policy_item_));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct cache_queue_policy_item_));
+
+ TRACE_OUT(cache_queue_policy_create_item);
+ return ((struct cache_policy_item_ *)retval);
+}
+
+static void
+cache_queue_policy_destroy_item(struct cache_policy_item_ *item)
+{
+
+ TRACE_IN(cache_queue_policy_destroy_item);
+ assert(item != NULL);
+ free(item);
+ TRACE_OUT(cache_queue_policy_destroy_item);
+}
+
+static void
+cache_queue_policy_add_item(struct cache_policy_ *policy,
+ struct cache_policy_item_ *item)
+{
+ struct cache_queue_policy_ *queue_policy;
+ struct cache_queue_policy_item_ *queue_item;
+
+ TRACE_IN(cache_queue_policy_add_item);
+ queue_policy = (struct cache_queue_policy_ *)policy;
+ queue_item = (struct cache_queue_policy_item_ *)item;
+ TAILQ_INSERT_TAIL(&queue_policy->head, queue_item, entries);
+ TRACE_OUT(cache_queue_policy_add_item);
+}
+
+static void
+cache_queue_policy_remove_item(struct cache_policy_ *policy,
+ struct cache_policy_item_ *item)
+{
+ struct cache_queue_policy_ *queue_policy;
+ struct cache_queue_policy_item_ *queue_item;
+
+ TRACE_IN(cache_queue_policy_remove_item);
+ queue_policy = (struct cache_queue_policy_ *)policy;
+ queue_item = (struct cache_queue_policy_item_ *)item;
+ TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
+ TRACE_OUT(cache_queue_policy_remove_item);
+}
+
+static struct cache_policy_item_ *
+cache_queue_policy_get_first_item(struct cache_policy_ *policy)
+{
+ struct cache_queue_policy_ *queue_policy;
+
+ TRACE_IN(cache_queue_policy_get_first_item);
+ queue_policy = (struct cache_queue_policy_ *)policy;
+ TRACE_OUT(cache_queue_policy_get_first_item);
+ return ((struct cache_policy_item_ *)TAILQ_FIRST(&queue_policy->head));
+}
+
+static struct cache_policy_item_ *
+cache_queue_policy_get_last_item(struct cache_policy_ *policy)
+{
+ struct cache_queue_policy_ *queue_policy;
+
+ TRACE_IN(cache_queue_policy_get_last_item);
+ queue_policy = (struct cache_queue_policy_ *)policy;
+ TRACE_OUT(cache_queue_policy_get_last_item);
+ return ((struct cache_policy_item_ *)TAILQ_LAST(&queue_policy->head,
+ cache_queue_policy_head_));
+}
+
+static struct cache_policy_item_ *
+cache_queue_policy_get_next_item(struct cache_policy_ *policy,
+ struct cache_policy_item_ *item)
+{
+ struct cache_queue_policy_ *queue_policy;
+ struct cache_queue_policy_item_ *queue_item;
+
+ TRACE_IN(cache_queue_policy_get_next_item);
+ queue_policy = (struct cache_queue_policy_ *)policy;
+ queue_item = (struct cache_queue_policy_item_ *)item;
+
+ TRACE_OUT(cache_queue_policy_get_next_item);
+ return ((struct cache_policy_item_ *)TAILQ_NEXT(queue_item, entries));
+}
+
+static struct cache_policy_item_ *
+cache_queue_policy_get_prev_item(struct cache_policy_ *policy,
+ struct cache_policy_item_ *item)
+{
+ struct cache_queue_policy_ *queue_policy;
+ struct cache_queue_policy_item_ *queue_item;
+
+ TRACE_IN(cache_queue_policy_get_prev_item);
+ queue_policy = (struct cache_queue_policy_ *)policy;
+ queue_item = (struct cache_queue_policy_item_ *)item;
+
+ TRACE_OUT(cache_queue_policy_get_prev_item);
+ return ((struct cache_policy_item_ *)TAILQ_PREV(queue_item,
+ cache_queue_policy_head_, entries));
+}
+
+/*
+ * Initializes cache_queue_policy_ by filling the structure with the functions
+ * pointers, defined above
+ */
+static struct cache_queue_policy_ *
+init_cache_queue_policy(void)
+{
+ struct cache_queue_policy_ *retval;
+
+ TRACE_IN(init_cache_queue_policy);
+ retval = (struct cache_queue_policy_ *)malloc(
+ sizeof(struct cache_queue_policy_));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct cache_queue_policy_));
+
+ retval->parent_data.create_item_func = cache_queue_policy_create_item;
+ retval->parent_data.destroy_item_func = cache_queue_policy_destroy_item;
+
+ retval->parent_data.add_item_func = cache_queue_policy_add_item;
+ retval->parent_data.remove_item_func = cache_queue_policy_remove_item;
+
+ retval->parent_data.get_first_item_func =
+ cache_queue_policy_get_first_item;
+ retval->parent_data.get_last_item_func =
+ cache_queue_policy_get_last_item;
+ retval->parent_data.get_next_item_func =
+ cache_queue_policy_get_next_item;
+ retval->parent_data.get_prev_item_func =
+ cache_queue_policy_get_prev_item;
+
+ TAILQ_INIT(&retval->head);
+ TRACE_OUT(init_cache_queue_policy);
+ return (retval);
+}
+
+static void
+destroy_cache_queue_policy(struct cache_queue_policy_ *queue_policy)
+{
+ struct cache_queue_policy_item_ *queue_item;
+
+ TRACE_IN(destroy_cache_queue_policy);
+ while (!TAILQ_EMPTY(&queue_policy->head)) {
+ queue_item = TAILQ_FIRST(&queue_policy->head);
+ TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
+ cache_queue_policy_destroy_item(
+ (struct cache_policy_item_ *)queue_item);
+ }
+ free(queue_policy);
+ TRACE_OUT(destroy_cache_queue_policy);
+}
+
+/*
+ * Makes cache_queue_policy_ behave like FIFO policy - we don't do anything,
+ * when the cache element is updated. So it always stays in its initial
+ * position in the queue - that is exactly the FIFO functionality.
+ */
+static void
+cache_fifo_policy_update_item(struct cache_policy_ *policy,
+ struct cache_policy_item_ *item)
+{
+
+ TRACE_IN(cache_fifo_policy_update_item);
+ /* policy and item arguments are ignored */
+ TRACE_OUT(cache_fifo_policy_update_item);
+}
+
+struct cache_policy_ *
+init_cache_fifo_policy()
+{
+ struct cache_queue_policy_ *retval;
+
+ TRACE_IN(init_cache_fifo_policy);
+ retval = init_cache_queue_policy();
+ retval->parent_data.update_item_func = cache_fifo_policy_update_item;
+
+ TRACE_OUT(init_cache_fifo_policy);
+ return ((struct cache_policy_ *)retval);
+}
+
+void
+destroy_cache_fifo_policy(struct cache_policy_ *policy)
+{
+ struct cache_queue_policy_ *queue_policy;
+
+ TRACE_IN(destroy_cache_fifo_policy);
+ queue_policy = (struct cache_queue_policy_ *)policy;
+ destroy_cache_queue_policy(queue_policy);
+ TRACE_OUT(destroy_cache_fifo_policy);
+}
+
+/*
+ * Makes cache_queue_policy_ behave like LRU policy. On each update, cache
+ * element is moved to the end of the queue - so it would be deleted in last
+ * turn. That is exactly the LRU policy functionality.
+ */
+static void
+cache_lru_policy_update_item(struct cache_policy_ *policy,
+ struct cache_policy_item_ *item)
+{
+ struct cache_queue_policy_ *queue_policy;
+ struct cache_queue_policy_item_ *queue_item;
+
+ TRACE_IN(cache_lru_policy_update_item);
+ queue_policy = (struct cache_queue_policy_ *)policy;
+ queue_item = (struct cache_queue_policy_item_ *)item;
+
+ TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
+ TAILQ_INSERT_TAIL(&queue_policy->head, queue_item, entries);
+ TRACE_OUT(cache_lru_policy_update_item);
+}
+
+struct cache_policy_ *
+init_cache_lru_policy()
+{
+ struct cache_queue_policy_ *retval;
+
+ TRACE_IN(init_cache_lru_policy);
+ retval = init_cache_queue_policy();
+ retval->parent_data.update_item_func = cache_lru_policy_update_item;
+
+ TRACE_OUT(init_cache_lru_policy);
+ return ((struct cache_policy_ *)retval);
+}
+
+void
+destroy_cache_lru_policy(struct cache_policy_ *policy)
+{
+ struct cache_queue_policy_ *queue_policy;
+
+ TRACE_IN(destroy_cache_lru_policy);
+ queue_policy = (struct cache_queue_policy_ *)policy;
+ destroy_cache_queue_policy(queue_policy);
+ TRACE_OUT(destroy_cache_lru_policy);
+}
+
+/*
+ * LFU (least frequently used) policy implementation differs much from the
+ * LRU and FIFO (both based on cache_queue_policy_). Almost all cache_policy_
+ * functions are implemented specifically for this policy. The idea of this
+ * policy is to represent frequency (real number) as the integer number and
+ * use it as the index in the array. Each array's element is
+ * the list of elements. For example, if we have the 100-elements
+ * array for this policy, the elements with frequency 0.1 (calls per-second)
+ * would be in 10th element of the array.
+ */
+static struct cache_policy_item_ *
+cache_lfu_policy_create_item(void)
+{
+ struct cache_lfu_policy_item_ *retval;
+
+ TRACE_IN(cache_lfu_policy_create_item);
+ retval = (struct cache_lfu_policy_item_ *)malloc(
+ sizeof(struct cache_lfu_policy_item_));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct cache_lfu_policy_item_));
+
+ TRACE_OUT(cache_lfu_policy_create_item);
+ return ((struct cache_policy_item_ *)retval);
+}
+
+static void
+cache_lfu_policy_destroy_item(struct cache_policy_item_ *item)
+{
+
+ TRACE_IN(cache_lfu_policy_destroy_item);
+ assert(item != NULL);
+ free(item);
+ TRACE_OUT(cache_lfu_policy_destroy_item);
+}
+
+/*
+ * When placed in the LFU policy queue for the first time, the maximum
+ * frequency is assigned to the element
+ */
+static void
+cache_lfu_policy_add_item(struct cache_policy_ *policy,
+ struct cache_policy_item_ *item)
+{
+ struct cache_lfu_policy_ *lfu_policy;
+ struct cache_lfu_policy_item_ *lfu_item;
+
+ TRACE_IN(cache_lfu_policy_add_item);
+ lfu_policy = (struct cache_lfu_policy_ *)policy;
+ lfu_item = (struct cache_lfu_policy_item_ *)item;
+
+ lfu_item->frequency = CACHELIB_MAX_FREQUENCY - 1;
+ TAILQ_INSERT_HEAD(&(lfu_policy->groups[CACHELIB_MAX_FREQUENCY - 1]),
+ lfu_item, entries);
+ TRACE_OUT(cache_lfu_policy_add_item);
+}
+
+/*
+ * On each update the frequency of the element is recalculated and, if it
+ * changed, the element would be moved to the another place in the array.
+ */
+static void
+cache_lfu_policy_update_item(struct cache_policy_ *policy,
+ struct cache_policy_item_ *item)
+{
+ struct cache_lfu_policy_ *lfu_policy;
+ struct cache_lfu_policy_item_ *lfu_item;
+ int index;
+
+ TRACE_IN(cache_lfu_policy_update_item);
+ lfu_policy = (struct cache_lfu_policy_ *)policy;
+ lfu_item = (struct cache_lfu_policy_item_ *)item;
+
+ /*
+ * We calculate the square of the request_count to avoid grouping of
+ * all elements at the start of the array (for example, if array size is
+ * 100 and most of its elements has frequency below the 0.01, they
+ * all would be grouped in the first array's position). Other
+ * techniques should be used here later to ensure, that elements are
+ * equally distributed in the array and not grouped in its beginning.
+ */
+ if (lfu_item->parent_data.last_request_time.tv_sec !=
+ lfu_item->parent_data.creation_time.tv_sec) {
+ index = ((double)lfu_item->parent_data.request_count *
+ (double)lfu_item->parent_data.request_count /
+ (lfu_item->parent_data.last_request_time.tv_sec -
+ lfu_item->parent_data.creation_time.tv_sec + 1)) *
+ CACHELIB_MAX_FREQUENCY;
+ if (index >= CACHELIB_MAX_FREQUENCY)
+ index = CACHELIB_MAX_FREQUENCY - 1;
+ } else
+ index = CACHELIB_MAX_FREQUENCY - 1;
+
+ TAILQ_REMOVE(&(lfu_policy->groups[lfu_item->frequency]), lfu_item,
+ entries);
+ lfu_item->frequency = index;
+ TAILQ_INSERT_HEAD(&(lfu_policy->groups[index]), lfu_item, entries);
+
+ TRACE_OUT(cache_lfu_policy_update_item);
+}
+
+static void
+cache_lfu_policy_remove_item(struct cache_policy_ *policy,
+ struct cache_policy_item_ *item)
+{
+ struct cache_lfu_policy_ *lfu_policy;
+ struct cache_lfu_policy_item_ *lfu_item;
+
+ TRACE_IN(cache_lfu_policy_remove_item);
+ lfu_policy = (struct cache_lfu_policy_ *)policy;
+ lfu_item = (struct cache_lfu_policy_item_ *)item;
+
+ TAILQ_REMOVE(&(lfu_policy->groups[lfu_item->frequency]), lfu_item,
+ entries);
+ TRACE_OUT(cache_lfu_policy_remove_item);
+}
+
+static struct cache_policy_item_ *
+cache_lfu_policy_get_first_item(struct cache_policy_ *policy)
+{
+ struct cache_lfu_policy_ *lfu_policy;
+ struct cache_lfu_policy_item_ *lfu_item;
+ int i;
+
+ TRACE_IN(cache_lfu_policy_get_first_item);
+ lfu_item = NULL;
+ lfu_policy = (struct cache_lfu_policy_ *)policy;
+ for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i)
+ if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
+ lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
+ break;
+ }
+
+ TRACE_OUT(cache_lfu_policy_get_first_item);
+ return ((struct cache_policy_item_ *)lfu_item);
+}
+
+static struct cache_policy_item_ *
+cache_lfu_policy_get_last_item(struct cache_policy_ *policy)
+{
+ struct cache_lfu_policy_ *lfu_policy;
+ struct cache_lfu_policy_item_ *lfu_item;
+ int i;
+
+ TRACE_IN(cache_lfu_policy_get_last_item);
+ lfu_item = NULL;
+ lfu_policy = (struct cache_lfu_policy_ *)policy;
+ for (i = CACHELIB_MAX_FREQUENCY - 1; i >= 0; --i)
+ if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
+ lfu_item = TAILQ_LAST(&(lfu_policy->groups[i]),
+ cache_lfu_policy_group_);
+ break;
+ }
+
+ TRACE_OUT(cache_lfu_policy_get_last_item);
+ return ((struct cache_policy_item_ *)lfu_item);
+}
+
+static struct cache_policy_item_ *
+cache_lfu_policy_get_next_item(struct cache_policy_ *policy,
+ struct cache_policy_item_ *item)
+{
+ struct cache_lfu_policy_ *lfu_policy;
+ struct cache_lfu_policy_item_ *lfu_item;
+ int i;
+
+ TRACE_IN(cache_lfu_policy_get_next_item);
+ lfu_policy = (struct cache_lfu_policy_ *)policy;
+ lfu_item = TAILQ_NEXT((struct cache_lfu_policy_item_ *)item, entries);
+ if (lfu_item == NULL)
+ {
+ for (i = ((struct cache_lfu_policy_item_ *)item)->frequency + 1;
+ i < CACHELIB_MAX_FREQUENCY; ++i) {
+ if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
+ lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
+ break;
+ }
+ }
+ }
+
+ TRACE_OUT(cache_lfu_policy_get_next_item);
+ return ((struct cache_policy_item_ *)lfu_item);
+}
+
+static struct cache_policy_item_ *
+cache_lfu_policy_get_prev_item(struct cache_policy_ *policy,
+ struct cache_policy_item_ *item)
+{
+ struct cache_lfu_policy_ *lfu_policy;
+ struct cache_lfu_policy_item_ *lfu_item;
+ int i;
+
+ TRACE_IN(cache_lfu_policy_get_prev_item);
+ lfu_policy = (struct cache_lfu_policy_ *)policy;
+ lfu_item = TAILQ_PREV((struct cache_lfu_policy_item_ *)item,
+ cache_lfu_policy_group_, entries);
+ if (lfu_item == NULL)
+ {
+ for (i = ((struct cache_lfu_policy_item_ *)item)->frequency - 1;
+ i >= 0; --i)
+ if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
+ lfu_item = TAILQ_LAST(&(lfu_policy->groups[i]),
+ cache_lfu_policy_group_);
+ break;
+ }
+ }
+
+ TRACE_OUT(cache_lfu_policy_get_prev_item);
+ return ((struct cache_policy_item_ *)lfu_item);
+}
+
+/*
+ * Initializes the cache_policy_ structure by filling it with appropriate
+ * functions pointers
+ */
+struct cache_policy_ *
+init_cache_lfu_policy()
+{
+ int i;
+ struct cache_lfu_policy_ *retval;
+
+ TRACE_IN(init_cache_lfu_policy);
+ retval = (struct cache_lfu_policy_ *)malloc(
+ sizeof(struct cache_lfu_policy_));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct cache_lfu_policy_));
+
+ retval->parent_data.create_item_func = cache_lfu_policy_create_item;
+ retval->parent_data.destroy_item_func = cache_lfu_policy_destroy_item;
+
+ retval->parent_data.add_item_func = cache_lfu_policy_add_item;
+ retval->parent_data.update_item_func = cache_lfu_policy_update_item;
+ retval->parent_data.remove_item_func = cache_lfu_policy_remove_item;
+
+ retval->parent_data.get_first_item_func =
+ cache_lfu_policy_get_first_item;
+ retval->parent_data.get_last_item_func =
+ cache_lfu_policy_get_last_item;
+ retval->parent_data.get_next_item_func =
+ cache_lfu_policy_get_next_item;
+ retval->parent_data.get_prev_item_func =
+ cache_lfu_policy_get_prev_item;
+
+ for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i)
+ TAILQ_INIT(&(retval->groups[i]));
+
+ TRACE_OUT(init_cache_lfu_policy);
+ return ((struct cache_policy_ *)retval);
+}
+
+void
+destroy_cache_lfu_policy(struct cache_policy_ *policy)
+{
+ int i;
+ struct cache_lfu_policy_ *lfu_policy;
+ struct cache_lfu_policy_item_ *lfu_item;
+
+ TRACE_IN(destroy_cache_lfu_policy);
+ lfu_policy = (struct cache_lfu_policy_ *)policy;
+ for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i) {
+ while (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
+ lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
+ TAILQ_REMOVE(&(lfu_policy->groups[i]), lfu_item,
+ entries);
+ cache_lfu_policy_destroy_item(
+ (struct cache_policy_item_ *)lfu_item);
+ }
+ }
+ free(policy);
+ TRACE_OUT(destroy_cache_lfu_policy);
+}
diff --git a/usr.sbin/nscd/cacheplcs.h b/usr.sbin/nscd/cacheplcs.h
new file mode 100644
index 0000000..36997f5
--- /dev/null
+++ b/usr.sbin/nscd/cacheplcs.h
@@ -0,0 +1,137 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_CACHEPLCS_H__
+#define __CACHED_CACHEPLCS_H__
+
+#include <sys/queue.h>
+#include <sys/time.h>
+#include <stdlib.h>
+
+/* common policy definitions */
+#define CACHELIB_MAX_FREQUENCY 100
+
+/*
+ * cache_policy_item_ represents some abstract cache element in the policy
+ * queue. connected_item pointers to the corresponding cache_policy_item_ in
+ * another policy queue.
+ */
+struct cache_policy_item_
+{
+ char *key;
+ size_t key_size;
+
+ size_t request_count;
+ struct timeval last_request_time;
+ struct timeval creation_time;
+
+ struct cache_policy_item_ *connected_item;
+};
+
+/*
+ * cache_policy_ represents an abstract policy queue. It can be customized by
+ * setting appropriate function pointers
+ */
+struct cache_policy_
+{
+ struct cache_policy_item_* (*create_item_func)();
+ void (*destroy_item_func)(struct cache_policy_item_ *);
+
+ void (*add_item_func)(struct cache_policy_ *,
+ struct cache_policy_item_ *);
+ void (*remove_item_func)(struct cache_policy_ *,
+ struct cache_policy_item_ *);
+ void (*update_item_func)(struct cache_policy_ *,
+ struct cache_policy_item_ *);
+
+ struct cache_policy_item_ *(*get_first_item_func)(
+ struct cache_policy_ *);
+ struct cache_policy_item_ *(*get_last_item_func)(
+ struct cache_policy_ *);
+ struct cache_policy_item_ *(*get_next_item_func)(
+ struct cache_policy_ *, struct cache_policy_item_ *);
+ struct cache_policy_item_ *(*get_prev_item_func)(
+ struct cache_policy_ *, struct cache_policy_item_ *);
+};
+
+/*
+ * LFU cache policy item "inherited" from cache_policy_item_ structure
+ */
+struct cache_lfu_policy_item_
+{
+ struct cache_policy_item_ parent_data;
+ int frequency;
+
+ TAILQ_ENTRY(cache_lfu_policy_item_) entries;
+};
+
+TAILQ_HEAD(cache_lfu_policy_group_, cache_lfu_policy_item_);
+
+/*
+ * LFU policy queue "inherited" from cache_policy_.
+ */
+struct cache_lfu_policy_
+{
+ struct cache_policy_ parent_data;
+ struct cache_lfu_policy_group_ groups[CACHELIB_MAX_FREQUENCY];
+};
+
+/*
+ * LRU and FIFO policies item "inherited" from cache_policy_item_
+ */
+struct cache_queue_policy_item_
+{
+ struct cache_policy_item_ parent_data;
+ TAILQ_ENTRY(cache_queue_policy_item_) entries;
+};
+
+/*
+ * LRU and FIFO policies "inherited" from cache_policy_
+ */
+struct cache_queue_policy_
+{
+ struct cache_policy_ parent_data;
+ TAILQ_HEAD(cache_queue_policy_head_, cache_queue_policy_item_) head;
+};
+
+typedef struct cache_queue_policy_ cache_fifo_policy_;
+typedef struct cache_queue_policy_ cache_lru_policy_;
+
+/* fifo policy routines */
+extern struct cache_policy_ *init_cache_fifo_policy();
+extern void destroy_cache_fifo_policy(struct cache_policy_ *);
+
+/* lru policy routines */
+extern struct cache_policy_ *init_cache_lru_policy();
+extern void destroy_cache_lru_policy(struct cache_policy_ *);
+
+/* lfu policy routines */
+extern struct cache_policy_ *init_cache_lfu_policy();
+extern void destroy_cache_lfu_policy(struct cache_policy_ *);
+
+#endif
diff --git a/usr.sbin/nscd/config.c b/usr.sbin/nscd/config.c
new file mode 100644
index 0000000..bc3eb49
--- /dev/null
+++ b/usr.sbin/nscd/config.c
@@ -0,0 +1,588 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <assert.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "config.h"
+#include "debug.h"
+#include "log.h"
+
+/*
+ * Default entries, which always exist in the configuration
+ */
+const char *c_default_entries[6] = {
+ NSDB_PASSWD,
+ NSDB_GROUP,
+ NSDB_HOSTS,
+ NSDB_SERVICES,
+ NSDB_PROTOCOLS,
+ NSDB_RPC
+ };
+
+static int configuration_entry_cmp(const void *, const void *);
+static int configuration_entry_sort_cmp(const void *, const void *);
+static int configuration_entry_cache_mp_sort_cmp(const void *, const void *);
+static int configuration_entry_cache_mp_cmp(const void *, const void *);
+static int configuration_entry_cache_mp_part_cmp(const void *, const void *);
+static struct configuration_entry *create_configuration_entry(const char *,
+ struct timeval const *, struct timeval const *,
+ struct common_cache_entry_params const *,
+ struct common_cache_entry_params const *,
+ struct mp_cache_entry_params const *);
+
+static int
+configuration_entry_sort_cmp(const void *e1, const void *e2)
+{
+ return (strcmp((*((struct configuration_entry **)e1))->name,
+ (*((struct configuration_entry **)e2))->name
+ ));
+}
+
+static int
+configuration_entry_cmp(const void *e1, const void *e2)
+{
+ return (strcmp((const char *)e1,
+ (*((struct configuration_entry **)e2))->name
+ ));
+}
+
+static int
+configuration_entry_cache_mp_sort_cmp(const void *e1, const void *e2)
+{
+ return (strcmp((*((cache_entry *)e1))->params->entry_name,
+ (*((cache_entry *)e2))->params->entry_name
+ ));
+}
+
+static int
+configuration_entry_cache_mp_cmp(const void *e1, const void *e2)
+{
+ return (strcmp((const char *)e1,
+ (*((cache_entry *)e2))->params->entry_name
+ ));
+}
+
+static int
+configuration_entry_cache_mp_part_cmp(const void *e1, const void *e2)
+{
+ return (strncmp((const char *)e1,
+ (*((cache_entry *)e2))->params->entry_name,
+ strlen((const char *)e1)
+ ));
+}
+
+static struct configuration_entry *
+create_configuration_entry(const char *name,
+ struct timeval const *common_timeout,
+ struct timeval const *mp_timeout,
+ struct common_cache_entry_params const *positive_params,
+ struct common_cache_entry_params const *negative_params,
+ struct mp_cache_entry_params const *mp_params)
+{
+ struct configuration_entry *retval;
+ size_t size;
+ int res;
+
+ TRACE_IN(create_configuration_entry);
+ assert(name != NULL);
+ assert(positive_params != NULL);
+ assert(negative_params != NULL);
+ assert(mp_params != NULL);
+
+ retval = (struct configuration_entry *)malloc(
+ sizeof(struct configuration_entry));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct configuration_entry));
+
+ res = pthread_mutex_init(&retval->positive_cache_lock, NULL);
+ if (res != 0) {
+ free(retval);
+ LOG_ERR_2("create_configuration_entry",
+ "can't create positive cache lock");
+ TRACE_OUT(create_configuration_entry);
+ return (NULL);
+ }
+
+ res = pthread_mutex_init(&retval->negative_cache_lock, NULL);
+ if (res != 0) {
+ pthread_mutex_destroy(&retval->positive_cache_lock);
+ free(retval);
+ LOG_ERR_2("create_configuration_entry",
+ "can't create negative cache lock");
+ TRACE_OUT(create_configuration_entry);
+ return (NULL);
+ }
+
+ res = pthread_mutex_init(&retval->mp_cache_lock, NULL);
+ if (res != 0) {
+ pthread_mutex_destroy(&retval->positive_cache_lock);
+ pthread_mutex_destroy(&retval->negative_cache_lock);
+ free(retval);
+ LOG_ERR_2("create_configuration_entry",
+ "can't create negative cache lock");
+ TRACE_OUT(create_configuration_entry);
+ return (NULL);
+ }
+
+ memcpy(&retval->positive_cache_params, positive_params,
+ sizeof(struct common_cache_entry_params));
+ memcpy(&retval->negative_cache_params, negative_params,
+ sizeof(struct common_cache_entry_params));
+ memcpy(&retval->mp_cache_params, mp_params,
+ sizeof(struct mp_cache_entry_params));
+
+ size = strlen(name);
+ retval->name = (char *)malloc(size + 1);
+ assert(retval->name != NULL);
+ memset(retval->name, 0, size + 1);
+ memcpy(retval->name, name, size);
+
+ memcpy(&retval->common_query_timeout, common_timeout,
+ sizeof(struct timeval));
+ memcpy(&retval->mp_query_timeout, mp_timeout,
+ sizeof(struct timeval));
+
+ asprintf(&retval->positive_cache_params.entry_name, "%s+", name);
+ assert(retval->positive_cache_params.entry_name != NULL);
+
+ asprintf(&retval->negative_cache_params.entry_name, "%s-", name);
+ assert(retval->negative_cache_params.entry_name != NULL);
+
+ asprintf(&retval->mp_cache_params.entry_name, "%s*", name);
+ assert(retval->mp_cache_params.entry_name != NULL);
+
+ TRACE_OUT(create_configuration_entry);
+ return (retval);
+}
+
+/*
+ * Creates configuration entry and fills it with default values
+ */
+struct configuration_entry *
+create_def_configuration_entry(const char *name)
+{
+ struct common_cache_entry_params positive_params, negative_params;
+ struct mp_cache_entry_params mp_params;
+ struct timeval default_common_timeout, default_mp_timeout;
+
+ struct configuration_entry *res = NULL;
+
+ TRACE_IN(create_def_configuration_entry);
+ memset(&positive_params, 0,
+ sizeof(struct common_cache_entry_params));
+ positive_params.entry_type = CET_COMMON;
+ positive_params.cache_entries_size = DEFAULT_CACHE_HT_SIZE;
+ positive_params.max_elemsize = DEFAULT_POSITIVE_ELEMENTS_SIZE;
+ positive_params.satisf_elemsize = DEFAULT_POSITIVE_ELEMENTS_SIZE / 2;
+ positive_params.max_lifetime.tv_sec = DEFAULT_POSITIVE_LIFETIME;
+ positive_params.policy = CPT_LRU;
+
+ memcpy(&negative_params, &positive_params,
+ sizeof(struct common_cache_entry_params));
+ negative_params.max_elemsize = DEFAULT_NEGATIVE_ELEMENTS_SIZE;
+ negative_params.satisf_elemsize = DEFAULT_NEGATIVE_ELEMENTS_SIZE / 2;
+ negative_params.max_lifetime.tv_sec = DEFAULT_NEGATIVE_LIFETIME;
+ negative_params.policy = CPT_FIFO;
+
+ memset(&default_common_timeout, 0, sizeof(struct timeval));
+ default_common_timeout.tv_sec = DEFAULT_COMMON_ENTRY_TIMEOUT;
+
+ memset(&default_mp_timeout, 0, sizeof(struct timeval));
+ default_mp_timeout.tv_sec = DEFAULT_MP_ENTRY_TIMEOUT;
+
+ memset(&mp_params, 0,
+ sizeof(struct mp_cache_entry_params));
+ mp_params.entry_type = CET_MULTIPART;
+ mp_params.max_elemsize = DEFAULT_MULTIPART_ELEMENTS_SIZE;
+ mp_params.max_sessions = DEFAULT_MULITPART_SESSIONS_SIZE;
+ mp_params.max_lifetime.tv_sec = DEFAULT_MULITPART_LIFETIME;
+
+ res = create_configuration_entry(name, &default_common_timeout,
+ &default_mp_timeout, &positive_params, &negative_params,
+ &mp_params);
+
+ TRACE_OUT(create_def_configuration_entry);
+ return (res);
+}
+
+void
+destroy_configuration_entry(struct configuration_entry *entry)
+{
+ TRACE_IN(destroy_configuration_entry);
+ assert(entry != NULL);
+ pthread_mutex_destroy(&entry->positive_cache_lock);
+ pthread_mutex_destroy(&entry->negative_cache_lock);
+ pthread_mutex_destroy(&entry->mp_cache_lock);
+ free(entry->name);
+ free(entry->positive_cache_params.entry_name);
+ free(entry->negative_cache_params.entry_name);
+ free(entry->mp_cache_params.entry_name);
+ free(entry->mp_cache_entries);
+ free(entry);
+ TRACE_OUT(destroy_configuration_entry);
+}
+
+int
+add_configuration_entry(struct configuration *config,
+ struct configuration_entry *entry)
+{
+ TRACE_IN(add_configuration_entry);
+ assert(entry != NULL);
+ assert(entry->name != NULL);
+ if (configuration_find_entry(config, entry->name) != NULL) {
+ TRACE_OUT(add_configuration_entry);
+ return (-1);
+ }
+
+ if (config->entries_size == config->entries_capacity) {
+ struct configuration_entry **new_entries;
+
+ config->entries_capacity *= 2;
+ new_entries = (struct configuration_entry **)malloc(
+ sizeof(struct configuration_entry *) *
+ config->entries_capacity);
+ assert(new_entries != NULL);
+ memset(new_entries, 0, sizeof(struct configuration_entry *) *
+ config->entries_capacity);
+ memcpy(new_entries, config->entries,
+ sizeof(struct configuration_entry *) *
+ config->entries_size);
+
+ free(config->entries);
+ config->entries = new_entries;
+ }
+
+ config->entries[config->entries_size++] = entry;
+ qsort(config->entries, config->entries_size,
+ sizeof(struct configuration_entry *),
+ configuration_entry_sort_cmp);
+
+ TRACE_OUT(add_configuration_entry);
+ return (0);
+}
+
+size_t
+configuration_get_entries_size(struct configuration *config)
+{
+ TRACE_IN(configuration_get_entries_size);
+ assert(config != NULL);
+ TRACE_OUT(configuration_get_entries_size);
+ return (config->entries_size);
+}
+
+struct configuration_entry *
+configuration_get_entry(struct configuration *config, size_t index)
+{
+ TRACE_IN(configuration_get_entry);
+ assert(config != NULL);
+ assert(index < config->entries_size);
+ TRACE_OUT(configuration_get_entry);
+ return (config->entries[index]);
+}
+
+struct configuration_entry *
+configuration_find_entry(struct configuration *config,
+ const char *name)
+{
+ struct configuration_entry **retval;
+
+ TRACE_IN(configuration_find_entry);
+
+ retval = bsearch(name, config->entries, config->entries_size,
+ sizeof(struct configuration_entry *), configuration_entry_cmp);
+ TRACE_OUT(configuration_find_entry);
+
+ return ((retval != NULL) ? *retval : NULL);
+}
+
+/*
+ * All multipart cache entries are stored in the configuration_entry in the
+ * sorted array (sorted by names). The 3 functions below manage this array.
+ */
+
+int
+configuration_entry_add_mp_cache_entry(struct configuration_entry *config_entry,
+ cache_entry c_entry)
+{
+ cache_entry *new_mp_entries, *old_mp_entries;
+
+ TRACE_IN(configuration_entry_add_mp_cache_entry);
+ ++config_entry->mp_cache_entries_size;
+ new_mp_entries = (cache_entry *)malloc(sizeof(cache_entry) *
+ config_entry->mp_cache_entries_size);
+ assert(new_mp_entries != NULL);
+ new_mp_entries[0] = c_entry;
+
+ if (config_entry->mp_cache_entries_size - 1 > 0) {
+ memcpy(new_mp_entries + 1,
+ config_entry->mp_cache_entries,
+ (config_entry->mp_cache_entries_size - 1) *
+ sizeof(cache_entry));
+ }
+
+ old_mp_entries = config_entry->mp_cache_entries;
+ config_entry->mp_cache_entries = new_mp_entries;
+ free(old_mp_entries);
+
+ qsort(config_entry->mp_cache_entries,
+ config_entry->mp_cache_entries_size,
+ sizeof(cache_entry),
+ configuration_entry_cache_mp_sort_cmp);
+
+ TRACE_OUT(configuration_entry_add_mp_cache_entry);
+ return (0);
+}
+
+cache_entry
+configuration_entry_find_mp_cache_entry(
+ struct configuration_entry *config_entry, const char *mp_name)
+{
+ cache_entry *result;
+
+ TRACE_IN(configuration_entry_find_mp_cache_entry);
+ result = bsearch(mp_name, config_entry->mp_cache_entries,
+ config_entry->mp_cache_entries_size,
+ sizeof(cache_entry), configuration_entry_cache_mp_cmp);
+
+ if (result == NULL) {
+ TRACE_OUT(configuration_entry_find_mp_cache_entry);
+ return (NULL);
+ } else {
+ TRACE_OUT(configuration_entry_find_mp_cache_entry);
+ return (*result);
+ }
+}
+
+/*
+ * Searches for all multipart entries with names starting with mp_name.
+ * Needed for cache flushing.
+ */
+int
+configuration_entry_find_mp_cache_entries(
+ struct configuration_entry *config_entry, const char *mp_name,
+ cache_entry **start, cache_entry **finish)
+{
+ cache_entry *result;
+
+ TRACE_IN(configuration_entry_find_mp_cache_entries);
+ result = bsearch(mp_name, config_entry->mp_cache_entries,
+ config_entry->mp_cache_entries_size,
+ sizeof(cache_entry), configuration_entry_cache_mp_part_cmp);
+
+ if (result == NULL) {
+ TRACE_OUT(configuration_entry_find_mp_cache_entries);
+ return (-1);
+ }
+
+ *start = result;
+ *finish = result + 1;
+
+ while (*start != config_entry->mp_cache_entries) {
+ if (configuration_entry_cache_mp_part_cmp(mp_name, *start - 1) == 0)
+ *start = *start - 1;
+ else
+ break;
+ }
+
+ while (*finish != config_entry->mp_cache_entries +
+ config_entry->mp_cache_entries_size) {
+
+ if (configuration_entry_cache_mp_part_cmp(
+ mp_name, *finish) == 0)
+ *finish = *finish + 1;
+ else
+ break;
+ }
+
+ TRACE_OUT(configuration_entry_find_mp_cache_entries);
+ return (0);
+}
+
+/*
+ * Configuration entry uses rwlock to handle access to its fields.
+ */
+void
+configuration_lock_rdlock(struct configuration *config)
+{
+ TRACE_IN(configuration_lock_rdlock);
+ pthread_rwlock_rdlock(&config->rwlock);
+ TRACE_OUT(configuration_lock_rdlock);
+}
+
+void
+configuration_lock_wrlock(struct configuration *config)
+{
+ TRACE_IN(configuration_lock_wrlock);
+ pthread_rwlock_wrlock(&config->rwlock);
+ TRACE_OUT(configuration_lock_wrlock);
+}
+
+void
+configuration_unlock(struct configuration *config)
+{
+ TRACE_IN(configuration_unlock);
+ pthread_rwlock_unlock(&config->rwlock);
+ TRACE_OUT(configuration_unlock);
+}
+
+/*
+ * Configuration entry uses 3 mutexes to handle cache operations. They are
+ * acquired by configuration_lock_entry and configuration_unlock_entry
+ * functions.
+ */
+void
+configuration_lock_entry(struct configuration_entry *entry,
+ enum config_entry_lock_type lock_type)
+{
+ TRACE_IN(configuration_lock_entry);
+ assert(entry != NULL);
+
+ switch (lock_type) {
+ case CELT_POSITIVE:
+ pthread_mutex_lock(&entry->positive_cache_lock);
+ break;
+ case CELT_NEGATIVE:
+ pthread_mutex_lock(&entry->negative_cache_lock);
+ break;
+ case CELT_MULTIPART:
+ pthread_mutex_lock(&entry->mp_cache_lock);
+ break;
+ default:
+ /* should be unreachable */
+ break;
+ }
+ TRACE_OUT(configuration_lock_entry);
+}
+
+void
+configuration_unlock_entry(struct configuration_entry *entry,
+ enum config_entry_lock_type lock_type)
+{
+ TRACE_IN(configuration_unlock_entry);
+ assert(entry != NULL);
+
+ switch (lock_type) {
+ case CELT_POSITIVE:
+ pthread_mutex_unlock(&entry->positive_cache_lock);
+ break;
+ case CELT_NEGATIVE:
+ pthread_mutex_unlock(&entry->negative_cache_lock);
+ break;
+ case CELT_MULTIPART:
+ pthread_mutex_unlock(&entry->mp_cache_lock);
+ break;
+ default:
+ /* should be unreachable */
+ break;
+ }
+ TRACE_OUT(configuration_unlock_entry);
+}
+
+struct configuration *
+init_configuration(void)
+{
+ struct configuration *retval;
+
+ TRACE_IN(init_configuration);
+ retval = (struct configuration *)malloc(sizeof(struct configuration));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct configuration));
+
+ retval->entries_capacity = INITIAL_ENTRIES_CAPACITY;
+ retval->entries = (struct configuration_entry **)malloc(
+ sizeof(struct configuration_entry *) *
+ retval->entries_capacity);
+ assert(retval->entries != NULL);
+ memset(retval->entries, 0, sizeof(struct configuration_entry *) *
+ retval->entries_capacity);
+
+ pthread_rwlock_init(&retval->rwlock, NULL);
+
+ TRACE_OUT(init_configuration);
+ return (retval);
+}
+
+void
+fill_configuration_defaults(struct configuration *config)
+{
+ size_t len, i;
+
+ TRACE_IN(fill_configuration_defaults);
+ assert(config != NULL);
+
+ if (config->socket_path != NULL)
+ free(config->socket_path);
+
+ len = strlen(DEFAULT_SOCKET_PATH);
+ config->socket_path = (char *)malloc(len + 1);
+ assert(config->socket_path != NULL);
+ memset(config->socket_path, 0, len + 1);
+ memcpy(config->socket_path, DEFAULT_SOCKET_PATH, len);
+
+ len = strlen(DEFAULT_PIDFILE_PATH);
+ config->pidfile_path = (char *)malloc(len + 1);
+ assert(config->pidfile_path != NULL);
+ memset(config->pidfile_path, 0, len + 1);
+ memcpy(config->pidfile_path, DEFAULT_PIDFILE_PATH, len);
+
+ config->socket_mode = S_IFSOCK | S_IRUSR | S_IWUSR |
+ S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
+ config->force_unlink = 1;
+
+ config->query_timeout = DEFAULT_QUERY_TIMEOUT;
+ config->threads_num = DEFAULT_THREADS_NUM;
+
+ for (i = 0; i < config->entries_size; ++i)
+ destroy_configuration_entry(config->entries[i]);
+ config->entries_size = 0;
+
+ TRACE_OUT(fill_configuration_defaults);
+}
+
+void
+destroy_configuration(struct configuration *config)
+{
+ int i;
+ TRACE_IN(destroy_configuration);
+ assert(config != NULL);
+ free(config->pidfile_path);
+ free(config->socket_path);
+
+ for (i = 0; i < config->entries_size; ++i)
+ destroy_configuration_entry(config->entries[i]);
+ free(config->entries);
+
+ pthread_rwlock_destroy(&config->rwlock);
+ free(config);
+ TRACE_OUT(destroy_configuration);
+}
diff --git a/usr.sbin/nscd/config.h b/usr.sbin/nscd/config.h
new file mode 100644
index 0000000..b54dc9b
--- /dev/null
+++ b/usr.sbin/nscd/config.h
@@ -0,0 +1,156 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_CONFIG_H__
+#define __CACHED_CONFIG_H__
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <pthread.h>
+#include <nsswitch.h>
+#include <unistd.h>
+#include "cachelib.h"
+
+#define DEFAULT_QUERY_TIMEOUT 8
+#define DEFAULT_THREADS_NUM 8
+
+#define DEFAULT_COMMON_ENTRY_TIMEOUT 10
+#define DEFAULT_MP_ENTRY_TIMEOUT 60
+#define DEFAULT_CACHE_HT_SIZE 257
+
+#define INITIAL_ENTRIES_CAPACITY 8
+#define DEFAULT_SOCKET_PATH "/var/run/cached"
+#define DEFAULT_PIDFILE_PATH "/var/run/cached.pid"
+
+#define DEFAULT_POSITIVE_ELEMENTS_SIZE (2048)
+#define DEFAULT_POSITIVE_LIFETIME (3600)
+
+#define DEFAULT_NEGATIVE_ELEMENTS_SIZE (2048)
+#define DEFAULT_NEGATIVE_LIFETIME (60)
+
+#define DEFAULT_MULTIPART_ELEMENTS_SIZE (1024 * 8)
+#define DEFAULT_MULITPART_SESSIONS_SIZE (1024)
+#define DEFAULT_MULITPART_LIFETIME (3600)
+
+extern const char *c_default_entries[6];
+
+/*
+ * Configuration entry represents the details of each cache entry in the
+ * config file (i.e. passwd or group). Its purpose also is to acquire locks
+ * of three different types (for usual read/write caching, for multipart
+ * caching and for caching of the negative results) for that cache entry.
+ */
+struct configuration_entry {
+ struct common_cache_entry_params positive_cache_params;
+ struct common_cache_entry_params negative_cache_params;
+ struct mp_cache_entry_params mp_cache_params;
+
+ /*
+ * configuration_entry holds pointers for all actual cache_entries,
+ * which are used for it. There is one for positive caching, one for
+ * for negative caching, and several (one per each euid/egid) for
+ * multipart caching.
+ */
+ cache_entry positive_cache_entry;
+ cache_entry negative_cache_entry;
+
+ cache_entry *mp_cache_entries;
+ size_t mp_cache_entries_size;
+
+ struct timeval common_query_timeout;
+ struct timeval mp_query_timeout;
+
+ char *name;
+ pthread_mutex_t positive_cache_lock;
+ pthread_mutex_t negative_cache_lock;
+ pthread_mutex_t mp_cache_lock;
+
+ int perform_actual_lookups;
+ int enabled;
+};
+
+/*
+ * Contains global configuration options and array of all configuration entries
+ */
+struct configuration {
+ char *pidfile_path;
+ char *socket_path;
+
+ struct configuration_entry **entries;
+ size_t entries_capacity;
+ size_t entries_size;
+
+ pthread_rwlock_t rwlock;
+
+ mode_t socket_mode;
+ int force_unlink;
+ int query_timeout;
+
+ int threads_num;
+};
+
+enum config_entry_lock_type {
+ CELT_POSITIVE,
+ CELT_NEGATIVE,
+ CELT_MULTIPART
+};
+
+extern struct configuration *init_configuration(void);
+extern void destroy_configuration(struct configuration *);
+extern void fill_configuration_defaults(struct configuration *);
+
+extern int add_configuration_entry(struct configuration *,
+ struct configuration_entry *);
+extern struct configuration_entry *create_def_configuration_entry(
+ const char *);
+extern void destroy_configuration_entry(struct configuration_entry *);
+extern size_t configuration_get_entries_size(struct configuration *);
+extern struct configuration_entry *configuration_get_entry(
+ struct configuration *, size_t);
+extern struct configuration_entry *configuration_find_entry(
+ struct configuration *, const char *);
+
+extern int configuration_entry_add_mp_cache_entry(struct configuration_entry *,
+ cache_entry);
+extern cache_entry configuration_entry_find_mp_cache_entry(
+ struct configuration_entry *,
+ const char *);
+extern int configuration_entry_find_mp_cache_entries(
+ struct configuration_entry *, const char *, cache_entry **,
+ cache_entry **);
+
+extern void configuration_lock_rdlock(struct configuration *config);
+extern void configuration_lock_wrlock(struct configuration *config);
+extern void configuration_unlock(struct configuration *config);
+
+extern void configuration_lock_entry(struct configuration_entry *,
+ enum config_entry_lock_type);
+extern void configuration_unlock_entry(struct configuration_entry *,
+ enum config_entry_lock_type);
+
+#endif
diff --git a/usr.sbin/nscd/debug.c b/usr.sbin/nscd/debug.c
new file mode 100644
index 0000000..420c517
--- /dev/null
+++ b/usr.sbin/nscd/debug.c
@@ -0,0 +1,149 @@
+/*-
+ * Copyright (c) 2004 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <stdio.h>
+#include "debug.h"
+
+static int trace_level = 0;
+static int trace_level_bk = 0;
+
+void
+__trace_in(const char *s, const char *f, int l)
+{
+ int i;
+ if (trace_level < TRACE_WANTED)
+ {
+ for (i = 0; i < trace_level; ++i)
+ printf("\t");
+
+ printf("=> %s\n", s);
+ }
+
+ ++trace_level;
+}
+
+void
+__trace_point(const char *f, int l)
+{
+ int i;
+
+ if (trace_level < TRACE_WANTED)
+ {
+ for (i = 0; i < trace_level - 1; ++i)
+ printf("\t");
+
+ printf("= %s: %d\n", f, l);
+ }
+}
+
+void
+__trace_msg(const char *msg, const char *f, int l)
+{
+ int i;
+
+ if (trace_level < TRACE_WANTED)
+ {
+ for (i = 0; i < trace_level - 1; ++i)
+ printf("\t");
+
+ printf("= MSG %s, %s: %d\n", msg, f, l);
+ }
+}
+
+void
+__trace_ptr(const char *desc, const void *p, const char *f, int l)
+{
+ int i;
+
+ if (trace_level < TRACE_WANTED)
+ {
+ for (i = 0; i < trace_level - 1; ++i)
+ printf("\t");
+
+ printf("= PTR %s: %p, %s: %d\n", desc, p, f, l);
+ }
+}
+
+void
+__trace_int(const char *desc, int i, const char *f, int l)
+{
+ int j;
+
+ if (trace_level < TRACE_WANTED)
+ {
+ for (j = 0; j < trace_level - 1; ++j)
+ printf("\t");
+
+ printf("= INT %s: %i, %s: %d\n",desc, i, f, l);
+ }
+}
+
+void
+__trace_str(const char *desc, const char *s, const char *f, int l)
+{
+ int i;
+
+ if (trace_level < TRACE_WANTED)
+ {
+ for (i = 0; i < trace_level - 1; ++i)
+ printf("\t");
+
+ printf("= STR %s: '%s', %s: %d\n", desc, s, f, l);
+ }
+}
+
+void
+__trace_out(const char *s, const char *f, int l)
+{
+ int i;
+
+ --trace_level;
+ if (trace_level < TRACE_WANTED)
+ {
+ for (i = 0; i < trace_level; ++i)
+ printf("\t");
+
+ printf("<= %s\n", s);
+ }
+}
+
+void
+__trace_on()
+{
+ trace_level = trace_level_bk;
+ trace_level_bk = 0;
+}
+
+void
+__trace_off()
+{
+ trace_level_bk = trace_level;
+ trace_level = 1024;
+}
diff --git a/usr.sbin/nscd/debug.h b/usr.sbin/nscd/debug.h
new file mode 100644
index 0000000..320e10f
--- /dev/null
+++ b/usr.sbin/nscd/debug.h
@@ -0,0 +1,67 @@
+/*-
+ * Copyright (c) 2004 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_DEBUG_H__
+#define __CACHED_DEBUG_H__
+
+#define TRACE_WANTED 32
+
+/* #ifndef NDEBUG */
+#if 0
+#define TRACE_IN(x) __trace_in(#x, __FILE__, __LINE__)
+#define TRACE_POINT() __trace_point(__FILE__, __LINE__)
+#define TRACE_MSG(x) __trace_msg(x, __FILE__, __LINE__)
+#define TRACE_PTR(p) __trace_ptr(#p, p, __FILE__, __LINE__)
+#define TRACE_INT(i) __trace_int(#i, i, __FILE__, __LINE__)
+#define TRACE_STR(s) __trace_str(#s, s, __FILE__, __LINE__)
+#define TRACE_OUT(x) __trace_out(#x, __FILE__, __LINE__)
+#define TRACE_ON() __trace_on()
+#define TRACE_OFF() __trace_off()
+#else
+#define TRACE_IN(x)
+#define TRACE_POINT()
+#define TRACE_MSG(x)
+#define TRACE_PTR(p)
+#define TRACE_INT(i)
+#define TRACE_STR(s)
+#define TRACE_OUT(x)
+#define TRACE_ON()
+#define TRACE_OFF()
+#endif
+
+extern void __trace_in(const char *, const char *, int);
+extern void __trace_point(const char *, int);
+extern void __trace_msg(const char *, const char *, int);
+extern void __trace_ptr(const char *, const void *, const char *, int);
+extern void __trace_int(const char *, int, const char *, int);
+extern void __trace_str(const char *, const char *, const char *, int);
+extern void __trace_out(const char *, const char *, int);
+extern void __trace_on();
+extern void __trace_off();
+
+#endif
diff --git a/usr.sbin/nscd/hashtable.h b/usr.sbin/nscd/hashtable.h
new file mode 100644
index 0000000..86dad9f
--- /dev/null
+++ b/usr.sbin/nscd/hashtable.h
@@ -0,0 +1,218 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHELIB_HASHTABLE_H__
+#define __CACHELIB_HASHTABLE_H__
+
+#include <search.h>
+#include <string.h>
+
+#define HASHTABLE_INITIAL_ENTRIES_CAPACITY 8
+typedef int hashtable_index_t;
+
+/*
+ * This file contains queue.h-like macro definitions for hash tables.
+ * Hash table is organized as an array of the specified size of the user
+ * defined (with HASTABLE_ENTRY_HEAD) structures. Each hash table
+ * entry (user defined structure) stores its elements in the sorted array.
+ * You can place elements into the hash table, retrieve elements with
+ * specified key, traverse through all elements, and delete them.
+ * New elements are placed into the hash table by using the compare and
+ * hashing functions, provided by the user.
+ */
+
+/*
+ * Defines the hash table entry structure, that uses specified type of
+ * elements.
+ */
+#define HASHTABLE_ENTRY_HEAD(name, type) struct name { \
+ type *values; \
+ size_t capacity; \
+ size_t size; \
+}
+
+/*
+ * Defines the hash table structure, which uses the specified type of entries.
+ * The only restriction for entries is that is that they should have the field,
+ * defined with HASHTABLE_ENTRY_HEAD macro.
+ */
+#define HASHTABLE_HEAD(name, entry) struct name { \
+ struct entry *entries; \
+ size_t entries_size; \
+}
+
+#define HASHTABLE_ENTRIES_COUNT(table) ((table)->entries_size)
+
+/*
+ * Unlike most of queue.h data types, hash tables can not be initialized
+ * statically - so there is no HASHTABLE_HEAD_INITIALIZED macro.
+ */
+#define HASHTABLE_INIT(table, type, field, _entries_size) \
+ do { \
+ hashtable_index_t var; \
+ (table)->entries = (void *)malloc( \
+ sizeof(*(table)->entries) * (_entries_size)); \
+ memset((table)->entries, 0, \
+ sizeof(*(table)->entries) * (_entries_size)); \
+ (table)->entries_size = (_entries_size); \
+ for (var = 0; var < HASHTABLE_ENTRIES_COUNT(table); ++var) {\
+ (table)->entries[var].field.capacity = \
+ HASHTABLE_INITIAL_ENTRIES_CAPACITY; \
+ (table)->entries[var].field.size = 0; \
+ (table)->entries[var].field.values = (type *)malloc(\
+ sizeof(type) * \
+ HASHTABLE_INITIAL_ENTRIES_CAPACITY); \
+ assert((table)->entries[var].field.values != NULL);\
+ } \
+ } while (0)
+
+/*
+ * All initialized hashtables should be destroyed with this macro.
+ */
+#define HASHTABLE_DESTROY(table, field) \
+ do { \
+ hashtable_index_t var; \
+ for (var = 0; var < HASHTABLE_ENTRIES_COUNT(table); ++var) {\
+ free((table)->entries[var].field.values); \
+ } \
+ } while (0)
+
+#define HASHTABLE_GET_ENTRY(table, hash) (&((table)->entries[hash]))
+
+/*
+ * Traverses through all hash table entries
+ */
+#define HASHTABLE_FOREACH(table, var) \
+ for ((var) = &((table)->entries[0]); \
+ (var) < &((table)->entries[HASHTABLE_ENTRIES_COUNT(table)]);\
+ ++(var))
+
+/*
+ * Traverses through all elements of the specified hash table entry
+ */
+#define HASHTABLE_ENTRY_FOREACH(entry, field, var) \
+ for ((var) = &((entry)->field.values[0]); \
+ (var) < &((entry)->field.values[(entry)->field.size]); \
+ ++(var))
+
+#define HASHTABLE_ENTRY_CLEAR(entry, field) \
+ ((entry)->field.size = 0)
+
+#define HASHTABLE_ENTRY_SIZE(entry, field) \
+ ((entry)->field.size)
+
+#define HASHTABLE_ENTRY_CAPACITY(entry, field) \
+ ((entry)->field.capacity)
+
+#define HASHTABLE_ENTRY_CAPACITY_INCREASE(entry, field, type) \
+ (entry)->field.capacity *= 2; \
+ (entry)->field.values = (type *)realloc((entry)->field.values, \
+ (entry)->field.capacity * sizeof(type));
+
+#define HASHTABLE_ENTRY_CAPACITY_DECREASE(entry, field, type) \
+ (entry)->field.capacity /= 2; \
+ (entry)->field.values = (type *)realloc((entry)->field.values, \
+ (entry)->field.capacity * sizeof(type));
+
+/*
+ * Generates prototypes for the hash table functions
+ */
+#define HASHTABLE_PROTOTYPE(name, entry_, type) \
+hashtable_index_t name##_CALCULATE_HASH(struct name *, type *); \
+void name##_ENTRY_STORE(struct entry_*, type *); \
+type *name##_ENTRY_FIND(struct entry_*, type *); \
+type *name##_ENTRY_FIND_SPECIAL(struct entry_ *, type *, \
+ int (*) (const void *, const void *)); \
+void name##_ENTRY_REMOVE(struct entry_*, type *);
+
+/*
+ * Generates implementations of the hash table functions
+ */
+#define HASHTABLE_GENERATE(name, entry_, type, field, HASH, CMP) \
+hashtable_index_t name##_CALCULATE_HASH(struct name *table, type *data) \
+{ \
+ \
+ return HASH(data, table->entries_size); \
+} \
+ \
+void name##_ENTRY_STORE(struct entry_ *the_entry, type *data) \
+{ \
+ \
+ if (the_entry->field.size == the_entry->field.capacity) \
+ HASHTABLE_ENTRY_CAPACITY_INCREASE(the_entry, field, type);\
+ \
+ memcpy(&(the_entry->field.values[the_entry->field.size++]), \
+ data, \
+ sizeof(type)); \
+ qsort(the_entry->field.values, the_entry->field.size, \
+ sizeof(type), CMP); \
+} \
+ \
+type *name##_ENTRY_FIND(struct entry_ *the_entry, type *key) \
+{ \
+ \
+ return ((type *)bsearch(key, the_entry->field.values, \
+ the_entry->field.size, sizeof(type), CMP)); \
+} \
+ \
+type *name##_ENTRY_FIND_SPECIAL(struct entry_ *the_entry, type *key, \
+ int (*compar) (const void *, const void *)) \
+{ \
+ return ((type *)bsearch(key, the_entry->field.values, \
+ the_entry->field.size, sizeof(type), compar)); \
+} \
+ \
+void name##_ENTRY_REMOVE(struct entry_ *the_entry, type *del_elm) \
+{ \
+ \
+ memmove(del_elm, del_elm + 1, \
+ (&the_entry->field.values[--the_entry->field.size] - del_elm) *\
+ sizeof(type)); \
+}
+
+/*
+ * Macro definitions below wrap the functions, generaed with
+ * HASHTABLE_GENERATE macro. You should use them and avoid using generated
+ * functions directly.
+ */
+#define HASHTABLE_CALCULATE_HASH(name, table, data) \
+ (name##_CALCULATE_HASH((table), data))
+
+#define HASHTABLE_ENTRY_STORE(name, entry, data) \
+ name##_ENTRY_STORE((entry), data)
+
+#define HASHTABLE_ENTRY_FIND(name, entry, key) \
+ (name##_ENTRY_FIND((entry), (key)))
+
+#define HASHTABLE_ENTRY_FIND_SPECIAL(name, entry, key, cmp) \
+ (name##_ENTRY_FIND_SPECIAL((entry), (key), (cmp)))
+
+#define HASHTABLE_ENTRY_REMOVE(name, entry, del_elm) \
+ name##_ENTRY_REMOVE((entry), (del_elm))
+
+#endif
diff --git a/usr.sbin/nscd/log.c b/usr.sbin/nscd/log.c
new file mode 100644
index 0000000..053930a
--- /dev/null
+++ b/usr.sbin/nscd/log.c
@@ -0,0 +1,78 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include "log.h"
+
+void
+__log_msg(int level, const char *sender, const char *message, ...)
+{
+ va_list ap;
+ char *fmessage;
+
+ fmessage = NULL;
+ va_start(ap, message);
+ vasprintf(&fmessage, message, ap);
+ va_end(ap);
+ assert(fmessage != NULL);
+
+ printf("M%d from %s: %s\n", level, sender, fmessage);
+#ifndef NO_SYSLOG
+ if (level == 0)
+ syslog(LOG_INFO, "cached message (from %s): %s", sender,
+ fmessage);
+#endif
+ free(fmessage);
+}
+
+void
+__log_err(int level, const char *sender, const char *error, ...)
+{
+ va_list ap;
+ char *ferror;
+
+ ferror = NULL;
+ va_start(ap, error);
+ vasprintf(&ferror, error, ap);
+ va_end(ap);
+ assert(ferror != NULL);
+
+ printf("E%d from %s: %s\n", level, sender, ferror);
+
+#ifndef NO_SYSLOG
+ if (level == 0)
+ syslog(LOG_ERR, "cached error (from %s): %s", sender, ferror);
+#endif
+ free(ferror);
+}
diff --git a/usr.sbin/nscd/log.h b/usr.sbin/nscd/log.h
new file mode 100644
index 0000000..8d665a4
--- /dev/null
+++ b/usr.sbin/nscd/log.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_LOG_H__
+#define __CACHED_LOG_H__
+
+#define LOG_MSG_1(sender, msg, ...) __log_msg(1, sender, msg, ##__VA_ARGS__)
+#define LOG_MSG_2(sender, msg, ...) __log_msg(2, sender, msg, ##__VA_ARGS__)
+#define LOG_MSG_3(sender, msg, ...) __log_msg(3, sedner, msg, ##__VA_ARGS__)
+
+#define LOG_ERR_1(sender, err, ...) __log_err(1, sender, err, ##__VA_ARGS__)
+#define LOG_ERR_2(sender, err, ...) __log_err(2, sender, err, ##__VA_ARGS__)
+#define LOG_ERR_3(sender, err, ...) __log_err(3, sender, err, ##__VA_ARGS__)
+
+extern void __log_msg(int, const char *, const char *, ...);
+extern void __log_err(int, const char *, const char *, ...);
+
+#endif
diff --git a/usr.sbin/nscd/mp_rs_query. b/usr.sbin/nscd/mp_rs_query.
new file mode 100644
index 0000000..f468afa
--- /dev/null
+++ b/usr.sbin/nscd/mp_rs_query.
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_MP_RS_QUERY_H__
+#define __CACHED_MP_RS_QUERY_H__
+
+extern int on_mp_read_session_request_read1(struct query_state *);
+
+#endif
diff --git a/usr.sbin/nscd/mp_rs_query.c b/usr.sbin/nscd/mp_rs_query.c
new file mode 100644
index 0000000..ae0f3bb
--- /dev/null
+++ b/usr.sbin/nscd/mp_rs_query.c
@@ -0,0 +1,537 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/event.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "cachelib.h"
+#include "config.h"
+#include "debug.h"
+#include "log.h"
+#include "query.h"
+#include "mp_rs_query.h"
+#include "mp_ws_query.h"
+#include "singletons.h"
+
+static int on_mp_read_session_close_notification(struct query_state *);
+static void on_mp_read_session_destroy(struct query_state *);
+static int on_mp_read_session_mapper(struct query_state *);
+/* int on_mp_read_session_request_read1(struct query_state *); */
+static int on_mp_read_session_request_read2(struct query_state *);
+static int on_mp_read_session_request_process(struct query_state *);
+static int on_mp_read_session_response_write1(struct query_state *);
+static int on_mp_read_session_read_request_process(struct query_state *);
+static int on_mp_read_session_read_response_write1(struct query_state *);
+static int on_mp_read_session_read_response_write2(struct query_state *);
+
+/*
+ * This function is used as the query_state's destroy_func to make the
+ * proper cleanup in case of errors.
+ */
+static void
+on_mp_read_session_destroy(struct query_state *qstate)
+{
+ TRACE_IN(on_mp_read_session_destroy);
+ finalize_comm_element(&qstate->request);
+ finalize_comm_element(&qstate->response);
+
+ if (qstate->mdata != NULL) {
+ configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
+ close_cache_mp_read_session(
+ (cache_mp_read_session)qstate->mdata);
+ configuration_unlock_entry(qstate->config_entry,
+ CELT_MULTIPART);
+ }
+ TRACE_OUT(on_mp_read_session_destroy);
+}
+
+/*
+ * The functions below are used to process multipart read session initiation
+ * requests.
+ * - on_mp_read_session_request_read1 and on_mp_read_session_request_read2 read
+ * the request itself
+ * - on_mp_read_session_request_process processes it
+ * - on_mp_read_session_response_write1 sends the response
+ */
+int
+on_mp_read_session_request_read1(struct query_state *qstate)
+{
+ struct cache_mp_read_session_request *c_mp_rs_request;
+ ssize_t result;
+
+ TRACE_IN(on_mp_read_session_request_read1);
+ if (qstate->kevent_watermark == 0)
+ qstate->kevent_watermark = sizeof(size_t);
+ else {
+ init_comm_element(&qstate->request,
+ CET_MP_READ_SESSION_REQUEST);
+ c_mp_rs_request = get_cache_mp_read_session_request(
+ &qstate->request);
+
+ result = qstate->read_func(qstate,
+ &c_mp_rs_request->entry_length, sizeof(size_t));
+
+ if (result != sizeof(size_t)) {
+ TRACE_OUT(on_mp_read_session_request_read1);
+ return (-1);
+ }
+
+ if (BUFSIZE_INVALID(c_mp_rs_request->entry_length)) {
+ TRACE_OUT(on_mp_read_session_request_read1);
+ return (-1);
+ }
+
+ c_mp_rs_request->entry = (char *)malloc(
+ c_mp_rs_request->entry_length + 1);
+ assert(c_mp_rs_request->entry != NULL);
+ memset(c_mp_rs_request->entry, 0,
+ c_mp_rs_request->entry_length + 1);
+
+ qstate->kevent_watermark = c_mp_rs_request->entry_length;
+ qstate->process_func = on_mp_read_session_request_read2;
+ }
+ TRACE_OUT(on_mp_read_session_request_read1);
+ return (0);
+}
+
+static int
+on_mp_read_session_request_read2(struct query_state *qstate)
+{
+ struct cache_mp_read_session_request *c_mp_rs_request;
+ ssize_t result;
+
+ TRACE_IN(on_mp_read_session_request_read2);
+ c_mp_rs_request = get_cache_mp_read_session_request(&qstate->request);
+
+ result = qstate->read_func(qstate, c_mp_rs_request->entry,
+ c_mp_rs_request->entry_length);
+
+ if (result != qstate->kevent_watermark) {
+ LOG_ERR_3("on_mp_read_session_request_read2",
+ "read failed");
+ TRACE_OUT(on_mp_read_session_request_read2);
+ return (-1);
+ }
+
+ qstate->kevent_watermark = 0;
+ qstate->process_func = on_mp_read_session_request_process;
+ TRACE_OUT(on_mp_read_session_request_read2);
+ return (0);
+}
+
+static int
+on_mp_read_session_request_process(struct query_state *qstate)
+{
+ struct cache_mp_read_session_request *c_mp_rs_request;
+ struct cache_mp_read_session_response *c_mp_rs_response;
+ cache_mp_read_session rs;
+ cache_entry c_entry;
+ char *dec_cache_entry_name;
+
+ char *buffer;
+ size_t buffer_size;
+ cache_mp_write_session ws;
+ struct agent *lookup_agent;
+ struct multipart_agent *mp_agent;
+ void *mdata;
+ int res;
+
+ TRACE_IN(on_mp_read_session_request_process);
+ init_comm_element(&qstate->response, CET_MP_READ_SESSION_RESPONSE);
+ c_mp_rs_response = get_cache_mp_read_session_response(
+ &qstate->response);
+ c_mp_rs_request = get_cache_mp_read_session_request(&qstate->request);
+
+ qstate->config_entry = configuration_find_entry(
+ s_configuration, c_mp_rs_request->entry);
+ if (qstate->config_entry == NULL) {
+ c_mp_rs_response->error_code = ENOENT;
+
+ LOG_ERR_2("read_session_request",
+ "can't find configuration entry '%s'."
+ " aborting request", c_mp_rs_request->entry);
+ goto fin;
+ }
+
+ if (qstate->config_entry->enabled == 0) {
+ c_mp_rs_response->error_code = EACCES;
+
+ LOG_ERR_2("read_session_request",
+ "configuration entry '%s' is disabled",
+ c_mp_rs_request->entry);
+ goto fin;
+ }
+
+ if (qstate->config_entry->perform_actual_lookups != 0)
+ dec_cache_entry_name = strdup(
+ qstate->config_entry->mp_cache_params.entry_name);
+ else {
+#ifdef NS_CACHED_EID_CHECKING
+ if (check_query_eids(qstate) != 0) {
+ c_mp_rs_response->error_code = EPERM;
+ goto fin;
+ }
+#endif
+
+ asprintf(&dec_cache_entry_name, "%s%s", qstate->eid_str,
+ qstate->config_entry->mp_cache_params.entry_name);
+ }
+
+ assert(dec_cache_entry_name != NULL);
+
+ configuration_lock_rdlock(s_configuration);
+ c_entry = find_cache_entry(s_cache, dec_cache_entry_name);
+ configuration_unlock(s_configuration);
+
+ if ((c_entry == INVALID_CACHE) &&
+ (qstate->config_entry->perform_actual_lookups != 0))
+ c_entry = register_new_mp_cache_entry(qstate,
+ dec_cache_entry_name);
+
+ free(dec_cache_entry_name);
+
+ if (c_entry != INVALID_CACHE_ENTRY) {
+ configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
+ rs = open_cache_mp_read_session(c_entry);
+ configuration_unlock_entry(qstate->config_entry,
+ CELT_MULTIPART);
+
+ if ((rs == INVALID_CACHE_MP_READ_SESSION) &&
+ (qstate->config_entry->perform_actual_lookups != 0)) {
+ lookup_agent = find_agent(s_agent_table,
+ c_mp_rs_request->entry, MULTIPART_AGENT);
+
+ if ((lookup_agent != NULL) &&
+ (lookup_agent->type == MULTIPART_AGENT)) {
+ mp_agent = (struct multipart_agent *)
+ lookup_agent;
+ mdata = mp_agent->mp_init_func();
+
+ /*
+ * Multipart agents read the whole snapshot
+ * of the data at one time.
+ */
+ configuration_lock_entry(qstate->config_entry,
+ CELT_MULTIPART);
+ ws = open_cache_mp_write_session(c_entry);
+ configuration_unlock_entry(qstate->config_entry,
+ CELT_MULTIPART);
+ if (ws != NULL) {
+ do {
+ buffer = NULL;
+ res = mp_agent->mp_lookup_func(&buffer,
+ &buffer_size,
+ mdata);
+
+ if ((res & NS_TERMINATE) &&
+ (buffer != NULL)) {
+ configuration_lock_entry(
+ qstate->config_entry,
+ CELT_MULTIPART);
+ if (cache_mp_write(ws, buffer,
+ buffer_size) != 0) {
+ abandon_cache_mp_write_session(ws);
+ ws = NULL;
+ }
+ configuration_unlock_entry(
+ qstate->config_entry,
+ CELT_MULTIPART);
+
+ free(buffer);
+ buffer = NULL;
+ } else {
+ configuration_lock_entry(
+ qstate->config_entry,
+ CELT_MULTIPART);
+ close_cache_mp_write_session(ws);
+ configuration_unlock_entry(
+ qstate->config_entry,
+ CELT_MULTIPART);
+
+ free(buffer);
+ buffer = NULL;
+ }
+ } while ((res & NS_TERMINATE) &&
+ (ws != NULL));
+ }
+
+ configuration_lock_entry(qstate->config_entry,
+ CELT_MULTIPART);
+ rs = open_cache_mp_read_session(c_entry);
+ configuration_unlock_entry(qstate->config_entry,
+ CELT_MULTIPART);
+ }
+ }
+
+ if (rs == INVALID_CACHE_MP_READ_SESSION)
+ c_mp_rs_response->error_code = -1;
+ else {
+ qstate->mdata = rs;
+ qstate->destroy_func = on_mp_read_session_destroy;
+
+ configuration_lock_entry(qstate->config_entry,
+ CELT_MULTIPART);
+ if ((qstate->config_entry->mp_query_timeout.tv_sec != 0) ||
+ (qstate->config_entry->mp_query_timeout.tv_usec != 0))
+ memcpy(&qstate->timeout,
+ &qstate->config_entry->mp_query_timeout,
+ sizeof(struct timeval));
+ configuration_unlock_entry(qstate->config_entry,
+ CELT_MULTIPART);
+ }
+ } else
+ c_mp_rs_response->error_code = -1;
+
+fin:
+ qstate->process_func = on_mp_read_session_response_write1;
+ qstate->kevent_watermark = sizeof(int);
+ qstate->kevent_filter = EVFILT_WRITE;
+
+ TRACE_OUT(on_mp_read_session_request_process);
+ return (0);
+}
+
+static int
+on_mp_read_session_response_write1(struct query_state *qstate)
+{
+ struct cache_mp_read_session_response *c_mp_rs_response;
+ ssize_t result;
+
+ TRACE_IN(on_mp_read_session_response_write1);
+ c_mp_rs_response = get_cache_mp_read_session_response(
+ &qstate->response);
+ result = qstate->write_func(qstate, &c_mp_rs_response->error_code,
+ sizeof(int));
+
+ if (result != sizeof(int)) {
+ LOG_ERR_3("on_mp_read_session_response_write1",
+ "write failed");
+ TRACE_OUT(on_mp_read_session_response_write1);
+ return (-1);
+ }
+
+ if (c_mp_rs_response->error_code == 0) {
+ qstate->kevent_watermark = sizeof(int);
+ qstate->process_func = on_mp_read_session_mapper;
+ qstate->kevent_filter = EVFILT_READ;
+ } else {
+ qstate->kevent_watermark = 0;
+ qstate->process_func = NULL;
+ }
+ TRACE_OUT(on_mp_read_session_response_write1);
+ return (0);
+}
+
+/*
+ * Mapper function is used to avoid multiple connections for each session
+ * write or read requests. After processing the request, it does not close
+ * the connection, but waits for the next request.
+ */
+static int
+on_mp_read_session_mapper(struct query_state *qstate)
+{
+ ssize_t result;
+ int elem_type;
+
+ TRACE_IN(on_mp_read_session_mapper);
+ if (qstate->kevent_watermark == 0) {
+ qstate->kevent_watermark = sizeof(int);
+ } else {
+ result = qstate->read_func(qstate, &elem_type, sizeof(int));
+ if (result != sizeof(int)) {
+ LOG_ERR_3("on_mp_read_session_mapper",
+ "read failed");
+ TRACE_OUT(on_mp_read_session_mapper);
+ return (-1);
+ }
+
+ switch (elem_type) {
+ case CET_MP_READ_SESSION_READ_REQUEST:
+ qstate->kevent_watermark = 0;
+ qstate->process_func =
+ on_mp_read_session_read_request_process;
+ break;
+ case CET_MP_READ_SESSION_CLOSE_NOTIFICATION:
+ qstate->kevent_watermark = 0;
+ qstate->process_func =
+ on_mp_read_session_close_notification;
+ break;
+ default:
+ qstate->kevent_watermark = 0;
+ qstate->process_func = NULL;
+ LOG_ERR_3("on_mp_read_session_mapper",
+ "unknown element type");
+ TRACE_OUT(on_mp_read_session_mapper);
+ return (-1);
+ }
+ }
+ TRACE_OUT(on_mp_read_session_mapper);
+ return (0);
+}
+
+/*
+ * The functions below are used to process multipart read sessions read
+ * requests. User doesn't have to pass any kind of data, besides the
+ * request identificator itself. So we don't need any XXX_read functions and
+ * start with the XXX_process function.
+ * - on_mp_read_session_read_request_process processes it
+ * - on_mp_read_session_read_response_write1 and
+ * on_mp_read_session_read_response_write2 sends the response
+ */
+static int
+on_mp_read_session_read_request_process(struct query_state *qstate)
+{
+ struct cache_mp_read_session_read_response *read_response;
+
+ TRACE_IN(on_mp_read_session_response_process);
+ init_comm_element(&qstate->response, CET_MP_READ_SESSION_READ_RESPONSE);
+ read_response = get_cache_mp_read_session_read_response(
+ &qstate->response);
+
+ configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
+ read_response->error_code = cache_mp_read(
+ (cache_mp_read_session)qstate->mdata, NULL,
+ &read_response->data_size);
+
+ if (read_response->error_code == 0) {
+ read_response->data = (char *)malloc(read_response->data_size);
+ assert(read_response != NULL);
+ read_response->error_code = cache_mp_read(
+ (cache_mp_read_session)qstate->mdata,
+ read_response->data,
+ &read_response->data_size);
+ }
+ configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
+
+ if (read_response->error_code == 0)
+ qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
+ else
+ qstate->kevent_watermark = sizeof(int);
+ qstate->process_func = on_mp_read_session_read_response_write1;
+ qstate->kevent_filter = EVFILT_WRITE;
+
+ TRACE_OUT(on_mp_read_session_response_process);
+ return (0);
+}
+
+static int
+on_mp_read_session_read_response_write1(struct query_state *qstate)
+{
+ struct cache_mp_read_session_read_response *read_response;
+ ssize_t result;
+
+ TRACE_IN(on_mp_read_session_read_response_write1);
+ read_response = get_cache_mp_read_session_read_response(
+ &qstate->response);
+
+ result = qstate->write_func(qstate, &read_response->error_code,
+ sizeof(int));
+ if (read_response->error_code == 0) {
+ result += qstate->write_func(qstate, &read_response->data_size,
+ sizeof(size_t));
+ if (result != qstate->kevent_watermark) {
+ TRACE_OUT(on_mp_read_session_read_response_write1);
+ LOG_ERR_3("on_mp_read_session_read_response_write1",
+ "write failed");
+ return (-1);
+ }
+
+ qstate->kevent_watermark = read_response->data_size;
+ qstate->process_func = on_mp_read_session_read_response_write2;
+ } else {
+ if (result != qstate->kevent_watermark) {
+ LOG_ERR_3("on_mp_read_session_read_response_write1",
+ "write failed");
+ TRACE_OUT(on_mp_read_session_read_response_write1);
+ return (-1);
+ }
+
+ qstate->kevent_watermark = 0;
+ qstate->process_func = NULL;
+ }
+
+ TRACE_OUT(on_mp_read_session_read_response_write1);
+ return (0);
+}
+
+static int
+on_mp_read_session_read_response_write2(struct query_state *qstate)
+{
+ struct cache_mp_read_session_read_response *read_response;
+ ssize_t result;
+
+ TRACE_IN(on_mp_read_session_read_response_write2);
+ read_response = get_cache_mp_read_session_read_response(
+ &qstate->response);
+ result = qstate->write_func(qstate, read_response->data,
+ read_response->data_size);
+ if (result != qstate->kevent_watermark) {
+ LOG_ERR_3("on_mp_read_session_read_response_write2",
+ "write failed");
+ TRACE_OUT(on_mp_read_session_read_response_write2);
+ return (-1);
+ }
+
+ finalize_comm_element(&qstate->request);
+ finalize_comm_element(&qstate->response);
+
+ qstate->kevent_watermark = sizeof(int);
+ qstate->process_func = on_mp_read_session_mapper;
+ qstate->kevent_filter = EVFILT_READ;
+
+ TRACE_OUT(on_mp_read_session_read_response_write2);
+ return (0);
+}
+
+/*
+ * Handles session close notification by calling close_cache_mp_read_session
+ * function.
+ */
+static int
+on_mp_read_session_close_notification(struct query_state *qstate)
+{
+
+ TRACE_IN(on_mp_read_session_close_notification);
+ configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
+ close_cache_mp_read_session((cache_mp_read_session)qstate->mdata);
+ configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
+ qstate->mdata = NULL;
+ qstate->kevent_watermark = 0;
+ qstate->process_func = NULL;
+ TRACE_OUT(on_mp_read_session_close_notification);
+ return (0);
+}
diff --git a/usr.sbin/nscd/mp_rs_query.h b/usr.sbin/nscd/mp_rs_query.h
new file mode 100644
index 0000000..f468afa
--- /dev/null
+++ b/usr.sbin/nscd/mp_rs_query.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_MP_RS_QUERY_H__
+#define __CACHED_MP_RS_QUERY_H__
+
+extern int on_mp_read_session_request_read1(struct query_state *);
+
+#endif
diff --git a/usr.sbin/nscd/mp_ws_query.c b/usr.sbin/nscd/mp_ws_query.c
new file mode 100644
index 0000000..d7aeb49
--- /dev/null
+++ b/usr.sbin/nscd/mp_ws_query.c
@@ -0,0 +1,548 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/event.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "cachelib.h"
+#include "config.h"
+#include "debug.h"
+#include "log.h"
+#include "query.h"
+#include "mp_ws_query.h"
+#include "singletons.h"
+
+static int on_mp_write_session_abandon_notification(struct query_state *);
+static int on_mp_write_session_close_notification(struct query_state *);
+static void on_mp_write_session_destroy(struct query_state *);
+static int on_mp_write_session_mapper(struct query_state *);
+/* int on_mp_write_session_request_read1(struct query_state *); */
+static int on_mp_write_session_request_read2(struct query_state *);
+static int on_mp_write_session_request_process(struct query_state *);
+static int on_mp_write_session_response_write1(struct query_state *);
+static int on_mp_write_session_write_request_read1(struct query_state *);
+static int on_mp_write_session_write_request_read2(struct query_state *);
+static int on_mp_write_session_write_request_process(struct query_state *);
+static int on_mp_write_session_write_response_write1(struct query_state *);
+
+/*
+ * This function is used as the query_state's destroy_func to make the
+ * proper cleanup in case of errors.
+ */
+static void
+on_mp_write_session_destroy(struct query_state *qstate)
+{
+
+ TRACE_IN(on_mp_write_session_destroy);
+ finalize_comm_element(&qstate->request);
+ finalize_comm_element(&qstate->response);
+
+ if (qstate->mdata != NULL) {
+ configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
+ abandon_cache_mp_write_session(
+ (cache_mp_write_session)qstate->mdata);
+ configuration_unlock_entry(qstate->config_entry,
+ CELT_MULTIPART);
+ }
+ TRACE_OUT(on_mp_write_session_destroy);
+}
+
+/*
+ * The functions below are used to process multipart write session initiation
+ * requests.
+ * - on_mp_write_session_request_read1 and on_mp_write_session_request_read2
+ * read the request itself
+ * - on_mp_write_session_request_process processes it
+ * - on_mp_write_session_response_write1 sends the response
+ */
+int
+on_mp_write_session_request_read1(struct query_state *qstate)
+{
+ struct cache_mp_write_session_request *c_mp_ws_request;
+ ssize_t result;
+
+ TRACE_IN(on_mp_write_session_request_read1);
+ if (qstate->kevent_watermark == 0)
+ qstate->kevent_watermark = sizeof(size_t);
+ else {
+ init_comm_element(&qstate->request,
+ CET_MP_WRITE_SESSION_REQUEST);
+ c_mp_ws_request = get_cache_mp_write_session_request(
+ &qstate->request);
+
+ result = qstate->read_func(qstate,
+ &c_mp_ws_request->entry_length, sizeof(size_t));
+
+ if (result != sizeof(size_t)) {
+ LOG_ERR_3("on_mp_write_session_request_read1",
+ "read failed");
+ TRACE_OUT(on_mp_write_session_request_read1);
+ return (-1);
+ }
+
+ if (BUFSIZE_INVALID(c_mp_ws_request->entry_length)) {
+ LOG_ERR_3("on_mp_write_session_request_read1",
+ "invalid entry_length value");
+ TRACE_OUT(on_mp_write_session_request_read1);
+ return (-1);
+ }
+
+ c_mp_ws_request->entry = (char *)malloc(
+ c_mp_ws_request->entry_length + 1);
+ assert(c_mp_ws_request->entry != NULL);
+ memset(c_mp_ws_request->entry, 0,
+ c_mp_ws_request->entry_length + 1);
+
+ qstate->kevent_watermark = c_mp_ws_request->entry_length;
+ qstate->process_func = on_mp_write_session_request_read2;
+ }
+ TRACE_OUT(on_mp_write_session_request_read1);
+ return (0);
+}
+
+static int
+on_mp_write_session_request_read2(struct query_state *qstate)
+{
+ struct cache_mp_write_session_request *c_mp_ws_request;
+ ssize_t result;
+
+ TRACE_IN(on_mp_write_session_request_read2);
+ c_mp_ws_request = get_cache_mp_write_session_request(&qstate->request);
+
+ result = qstate->read_func(qstate, c_mp_ws_request->entry,
+ c_mp_ws_request->entry_length);
+
+ if (result != qstate->kevent_watermark) {
+ LOG_ERR_3("on_mp_write_session_request_read2",
+ "read failed");
+ TRACE_OUT(on_mp_write_session_request_read2);
+ return (-1);
+ }
+
+ qstate->kevent_watermark = 0;
+ qstate->process_func = on_mp_write_session_request_process;
+
+ TRACE_OUT(on_mp_write_session_request_read2);
+ return (0);
+}
+
+static int
+on_mp_write_session_request_process(struct query_state *qstate)
+{
+ struct cache_mp_write_session_request *c_mp_ws_request;
+ struct cache_mp_write_session_response *c_mp_ws_response;
+ cache_mp_write_session ws;
+ cache_entry c_entry;
+ char *dec_cache_entry_name;
+
+ TRACE_IN(on_mp_write_session_request_process);
+ init_comm_element(&qstate->response, CET_MP_WRITE_SESSION_RESPONSE);
+ c_mp_ws_response = get_cache_mp_write_session_response(
+ &qstate->response);
+ c_mp_ws_request = get_cache_mp_write_session_request(&qstate->request);
+
+ qstate->config_entry = configuration_find_entry(
+ s_configuration, c_mp_ws_request->entry);
+ if (qstate->config_entry == NULL) {
+ c_mp_ws_response->error_code = ENOENT;
+
+ LOG_ERR_2("write_session_request",
+ "can't find configuration entry '%s'. "
+ "aborting request", c_mp_ws_request->entry);
+ goto fin;
+ }
+
+ if (qstate->config_entry->enabled == 0) {
+ c_mp_ws_response->error_code = EACCES;
+
+ LOG_ERR_2("write_session_request",
+ "configuration entry '%s' is disabled",
+ c_mp_ws_request->entry);
+ goto fin;
+ }
+
+ if (qstate->config_entry->perform_actual_lookups != 0) {
+ c_mp_ws_response->error_code = EOPNOTSUPP;
+
+ LOG_ERR_2("write_session_request",
+ "entry '%s' performs lookups by itself: "
+ "can't write to it", c_mp_ws_request->entry);
+ goto fin;
+ } else {
+#ifdef NS_CACHED_EID_CHECKING
+ if (check_query_eids(qstate) != 0) {
+ c_mp_ws_response->error_code = EPERM;
+ goto fin;
+ }
+#endif
+ }
+
+ /*
+ * All multipart entries are separated by their name decorations.
+ * For one configuration entry there will be a lot of multipart
+ * cache entries - each with its own decorated name.
+ */
+ asprintf(&dec_cache_entry_name, "%s%s", qstate->eid_str,
+ qstate->config_entry->mp_cache_params.entry_name);
+ assert(dec_cache_entry_name != NULL);
+
+ configuration_lock_rdlock(s_configuration);
+ c_entry = find_cache_entry(s_cache,
+ dec_cache_entry_name);
+ configuration_unlock(s_configuration);
+
+ if (c_entry == INVALID_CACHE_ENTRY)
+ c_entry = register_new_mp_cache_entry(qstate,
+ dec_cache_entry_name);
+
+ free(dec_cache_entry_name);
+
+ assert(c_entry != NULL);
+ configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
+ ws = open_cache_mp_write_session(c_entry);
+ if (ws == INVALID_CACHE_MP_WRITE_SESSION)
+ c_mp_ws_response->error_code = -1;
+ else {
+ qstate->mdata = ws;
+ qstate->destroy_func = on_mp_write_session_destroy;
+
+ if ((qstate->config_entry->mp_query_timeout.tv_sec != 0) ||
+ (qstate->config_entry->mp_query_timeout.tv_usec != 0))
+ memcpy(&qstate->timeout,
+ &qstate->config_entry->mp_query_timeout,
+ sizeof(struct timeval));
+ }
+ configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
+
+fin:
+ qstate->process_func = on_mp_write_session_response_write1;
+ qstate->kevent_watermark = sizeof(int);
+ qstate->kevent_filter = EVFILT_WRITE;
+
+ TRACE_OUT(on_mp_write_session_request_process);
+ return (0);
+}
+
+static int
+on_mp_write_session_response_write1(struct query_state *qstate)
+{
+ struct cache_mp_write_session_response *c_mp_ws_response;
+ ssize_t result;
+
+ TRACE_IN(on_mp_write_session_response_write1);
+ c_mp_ws_response = get_cache_mp_write_session_response(
+ &qstate->response);
+ result = qstate->write_func(qstate, &c_mp_ws_response->error_code,
+ sizeof(int));
+ if (result != sizeof(int)) {
+ LOG_ERR_3("on_mp_write_session_response_write1",
+ "write failed");
+ TRACE_OUT(on_mp_write_session_response_write1);
+ return (-1);
+ }
+
+ if (c_mp_ws_response->error_code == 0) {
+ qstate->kevent_watermark = sizeof(int);
+ qstate->process_func = on_mp_write_session_mapper;
+ qstate->kevent_filter = EVFILT_READ;
+ } else {
+ qstate->kevent_watermark = 0;
+ qstate->process_func = NULL;
+ }
+ TRACE_OUT(on_mp_write_session_response_write1);
+ return (0);
+}
+
+/*
+ * Mapper function is used to avoid multiple connections for each session
+ * write or read requests. After processing the request, it does not close
+ * the connection, but waits for the next request.
+ */
+static int
+on_mp_write_session_mapper(struct query_state *qstate)
+{
+ ssize_t result;
+ int elem_type;
+
+ TRACE_IN(on_mp_write_session_mapper);
+ if (qstate->kevent_watermark == 0) {
+ qstate->kevent_watermark = sizeof(int);
+ } else {
+ result = qstate->read_func(qstate, &elem_type, sizeof(int));
+ if (result != sizeof(int)) {
+ LOG_ERR_3("on_mp_write_session_mapper",
+ "read failed");
+ TRACE_OUT(on_mp_write_session_mapper);
+ return (-1);
+ }
+
+ switch (elem_type) {
+ case CET_MP_WRITE_SESSION_WRITE_REQUEST:
+ qstate->kevent_watermark = sizeof(size_t);
+ qstate->process_func =
+ on_mp_write_session_write_request_read1;
+ break;
+ case CET_MP_WRITE_SESSION_ABANDON_NOTIFICATION:
+ qstate->kevent_watermark = 0;
+ qstate->process_func =
+ on_mp_write_session_abandon_notification;
+ break;
+ case CET_MP_WRITE_SESSION_CLOSE_NOTIFICATION:
+ qstate->kevent_watermark = 0;
+ qstate->process_func =
+ on_mp_write_session_close_notification;
+ break;
+ default:
+ qstate->kevent_watermark = 0;
+ qstate->process_func = NULL;
+ LOG_ERR_2("on_mp_write_session_mapper",
+ "unknown element type");
+ TRACE_OUT(on_mp_write_session_mapper);
+ return (-1);
+ }
+ }
+ TRACE_OUT(on_mp_write_session_mapper);
+ return (0);
+}
+
+/*
+ * The functions below are used to process multipart write sessions write
+ * requests.
+ * - on_mp_write_session_write_request_read1 and
+ * on_mp_write_session_write_request_read2 read the request itself
+ * - on_mp_write_session_write_request_process processes it
+ * - on_mp_write_session_write_response_write1 sends the response
+ */
+static int
+on_mp_write_session_write_request_read1(struct query_state *qstate)
+{
+ struct cache_mp_write_session_write_request *write_request;
+ ssize_t result;
+
+ TRACE_IN(on_mp_write_session_write_request_read1);
+ init_comm_element(&qstate->request,
+ CET_MP_WRITE_SESSION_WRITE_REQUEST);
+ write_request = get_cache_mp_write_session_write_request(
+ &qstate->request);
+
+ result = qstate->read_func(qstate, &write_request->data_size,
+ sizeof(size_t));
+
+ if (result != sizeof(size_t)) {
+ LOG_ERR_3("on_mp_write_session_write_request_read1",
+ "read failed");
+ TRACE_OUT(on_mp_write_session_write_request_read1);
+ return (-1);
+ }
+
+ if (BUFSIZE_INVALID(write_request->data_size)) {
+ LOG_ERR_3("on_mp_write_session_write_request_read1",
+ "invalid data_size value");
+ TRACE_OUT(on_mp_write_session_write_request_read1);
+ return (-1);
+ }
+
+ write_request->data = (char *)malloc(write_request->data_size);
+ assert(write_request->data != NULL);
+ memset(write_request->data, 0, write_request->data_size);
+
+ qstate->kevent_watermark = write_request->data_size;
+ qstate->process_func = on_mp_write_session_write_request_read2;
+ TRACE_OUT(on_mp_write_session_write_request_read1);
+ return (0);
+}
+
+static int
+on_mp_write_session_write_request_read2(struct query_state *qstate)
+{
+ struct cache_mp_write_session_write_request *write_request;
+ ssize_t result;
+
+ TRACE_IN(on_mp_write_session_write_request_read2);
+ write_request = get_cache_mp_write_session_write_request(
+ &qstate->request);
+
+ result = qstate->read_func(qstate, write_request->data,
+ write_request->data_size);
+
+ if (result != qstate->kevent_watermark) {
+ LOG_ERR_3("on_mp_write_session_write_request_read2",
+ "read failed");
+ TRACE_OUT(on_mp_write_session_write_request_read2);
+ return (-1);
+ }
+
+ qstate->kevent_watermark = 0;
+ qstate->process_func = on_mp_write_session_write_request_process;
+ TRACE_OUT(on_mp_write_session_write_request_read2);
+ return (0);
+}
+
+static int
+on_mp_write_session_write_request_process(struct query_state *qstate)
+{
+ struct cache_mp_write_session_write_request *write_request;
+ struct cache_mp_write_session_write_response *write_response;
+
+ TRACE_IN(on_mp_write_session_write_request_process);
+ init_comm_element(&qstate->response,
+ CET_MP_WRITE_SESSION_WRITE_RESPONSE);
+ write_response = get_cache_mp_write_session_write_response(
+ &qstate->response);
+ write_request = get_cache_mp_write_session_write_request(
+ &qstate->request);
+
+ configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
+ write_response->error_code = cache_mp_write(
+ (cache_mp_write_session)qstate->mdata,
+ write_request->data,
+ write_request->data_size);
+ configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
+
+ qstate->kevent_watermark = sizeof(int);
+ qstate->process_func = on_mp_write_session_write_response_write1;
+ qstate->kevent_filter = EVFILT_WRITE;
+
+ TRACE_OUT(on_mp_write_session_write_request_process);
+ return (0);
+}
+
+static int
+on_mp_write_session_write_response_write1(struct query_state *qstate)
+{
+ struct cache_mp_write_session_write_response *write_response;
+ ssize_t result;
+
+ TRACE_IN(on_mp_write_session_write_response_write1);
+ write_response = get_cache_mp_write_session_write_response(
+ &qstate->response);
+ result = qstate->write_func(qstate, &write_response->error_code,
+ sizeof(int));
+ if (result != sizeof(int)) {
+ LOG_ERR_3("on_mp_write_session_write_response_write1",
+ "write failed");
+ TRACE_OUT(on_mp_write_session_write_response_write1);
+ return (-1);
+ }
+
+ if (write_response->error_code == 0) {
+ finalize_comm_element(&qstate->request);
+ finalize_comm_element(&qstate->response);
+
+ qstate->kevent_watermark = sizeof(int);
+ qstate->process_func = on_mp_write_session_mapper;
+ qstate->kevent_filter = EVFILT_READ;
+ } else {
+ qstate->kevent_watermark = 0;
+ qstate->process_func = 0;
+ }
+
+ TRACE_OUT(on_mp_write_session_write_response_write1);
+ return (0);
+}
+
+/*
+ * Handles abandon notifications. Destroys the session by calling the
+ * abandon_cache_mp_write_session.
+ */
+static int
+on_mp_write_session_abandon_notification(struct query_state *qstate)
+{
+ TRACE_IN(on_mp_write_session_abandon_notification);
+ configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
+ abandon_cache_mp_write_session((cache_mp_write_session)qstate->mdata);
+ configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
+ qstate->mdata = INVALID_CACHE_MP_WRITE_SESSION;
+
+ qstate->kevent_watermark = 0;
+ qstate->process_func = NULL;
+ TRACE_OUT(on_mp_write_session_abandon_notification);
+ return (0);
+}
+
+/*
+ * Handles close notifications. Commits the session by calling
+ * the close_cache_mp_write_session.
+ */
+static int
+on_mp_write_session_close_notification(struct query_state *qstate)
+{
+ TRACE_IN(on_mp_write_session_close_notification);
+ configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
+ close_cache_mp_write_session((cache_mp_write_session)qstate->mdata);
+ configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
+ qstate->mdata = INVALID_CACHE_MP_WRITE_SESSION;
+
+ qstate->kevent_watermark = 0;
+ qstate->process_func = NULL;
+ TRACE_OUT(on_mp_write_session_close_notification);
+ return (0);
+}
+
+cache_entry register_new_mp_cache_entry(struct query_state *qstate,
+ const char *dec_cache_entry_name)
+{
+ cache_entry c_entry;
+ char *en_bkp;
+
+ TRACE_IN(register_new_mp_cache_entry);
+ c_entry = INVALID_CACHE_ENTRY;
+ configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
+
+ configuration_lock_wrlock(s_configuration);
+ en_bkp = qstate->config_entry->mp_cache_params.entry_name;
+ qstate->config_entry->mp_cache_params.entry_name =
+ (char *)dec_cache_entry_name;
+ register_cache_entry(s_cache, (struct cache_entry_params *)
+ &qstate->config_entry->mp_cache_params);
+ qstate->config_entry->mp_cache_params.entry_name = en_bkp;
+ configuration_unlock(s_configuration);
+
+ configuration_lock_rdlock(s_configuration);
+ c_entry = find_cache_entry(s_cache,
+ dec_cache_entry_name);
+ configuration_unlock(s_configuration);
+
+ configuration_entry_add_mp_cache_entry(qstate->config_entry,
+ c_entry);
+
+ configuration_unlock_entry(qstate->config_entry,
+ CELT_MULTIPART);
+
+ TRACE_OUT(register_new_mp_cache_entry);
+ return (c_entry);
+}
diff --git a/usr.sbin/nscd/mp_ws_query.h b/usr.sbin/nscd/mp_ws_query.h
new file mode 100644
index 0000000..ba77665
--- /dev/null
+++ b/usr.sbin/nscd/mp_ws_query.h
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_MP_WS_QUERY_H__
+#define __CACHED_MP_WS_QUERY_H__
+
+extern int on_mp_write_session_request_read1(struct query_state *);
+extern cache_entry register_new_mp_cache_entry(struct query_state *,
+ const char *);
+
+#endif
diff --git a/usr.sbin/nscd/nscd.8 b/usr.sbin/nscd/nscd.8
new file mode 100644
index 0000000..5a8693a
--- /dev/null
+++ b/usr.sbin/nscd/nscd.8
@@ -0,0 +1,148 @@
+.\" Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+
+.Dd October 20, 2005
+.Dt CACHED 8
+.Os
+.Sh NAME
+.Nm cached
+.Nd caching server daemon
+.Sh SYNOPSIS
+.Nm
+.Op Fl nst
+.Op Fl i Ar cachename
+.Op Fl I Ar cachename
+.Sh DESCRIPTION
+.Nm
+- is the system caching daemon.
+It can cache almost all types of data and is basically intended to be used
+with
+.Pa nsswitch
+subsystem.
+The cache is actually per-user. It means that each user can work only with the
+cached data, that were cached by him and can't poison the cache of other users.
+.Nm
+supports 2 types of caching:
+.Bl -tag -width Pair
+.It Sy Type
+.Sy Description
+.It Common caching
+Each cached element is the the key+value pair.
+This type of caching supports policies, which are applied, when maximum
+number of cached elements is exceeded.
+3 policies are available:
+.Pa FIFO
+(first in - first out),
+.Pa LRU
+(least recently used) and
+.Pa LFU
+(least frequently used).
+This type of caching is used with
+.Fn getXXXbyname
+- like functions.
+.It Multipart caching
+Each cached element is the part of the elements sequence.
+This type of caching is intended to be used with
+.Fn getXXXent
+- like functions.
+.El
+.Pp
+.Nm
+is able not only to cache elements, but to perform the actual nsswitch
+lookups by itself. To enable this feature use
+.Pa perform-actual-lookups
+parameter in the
+.Xr cached.conf 5
+.Pp
+.Nm
+recognizes the following runtime options:
+.Bl -tag -width indent
+.It Fl n
+Do not daemonize.
+.Nm
+doesn't fork and doesn't disconnect itself from the terminal.
+.It Fl s
+Single-threaded mode.
+Forces using only one thread for all processing purposes (it overrides
+the
+.Pa threads
+parameter in the
+.Xr cached.conf 5
+file).
+.It Fl t
+Trace mode.
+All trace messages would be written to the stdout.
+This mode is usually used with
+.Fl n
+and
+.Fl s
+flags for debugging purposes.
+.It Fl i Ar cachename
+Invalidates personal cache. When specified,
+.Nm
+acts as the administration tool. It asks the already
+running
+.Nm
+to invalidate the specified part of the cache of the
+calling user. For example, you may want sometimes
+to invalidate your
+.Pa hosts
+cache. You can specify
+.Pa all
+as the
+.Pa cachename
+to invalidate your personal cache as a whole. You
+can't use this option for the cachename, for which
+the
+.Pa perform-actual-lookups
+option is enabled.
+.It Fl I Ar cachename
+Invalidates the cache for every user. When specified,
+.Nm
+acts as the administration tool. It asks the already
+running
+.Nm
+to invalidate the specified part of the cache for
+every user. You can specify
+.Pa all
+as the
+.Pa cachename
+to invalidate the whole cache. Only root can use this
+option.
+.El
+.Sh FILES
+.Xr cached.conf 5
+.Sh SEE ALSO
+.Xr cached.conf 5
+.Xr nsswitch.conf 5
+.Xr nsdispatch 3
+.Sh "AUTHORS"
+Michael Bushkov
+.Aq bushman@rsu.ru
+.Sh "BUGS"
+To report bugs or suggestions please mail me
+.Aq bushman@rsu.ru
diff --git a/usr.sbin/nscd/nscd.c b/usr.sbin/nscd/nscd.c
new file mode 100644
index 0000000..3219d22
--- /dev/null
+++ b/usr.sbin/nscd/nscd.c
@@ -0,0 +1,884 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in thereg
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/param.h>
+#include <sys/un.h>
+#include <assert.h>
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libutil.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "agents/passwd.h"
+#include "agents/group.h"
+#include "agents/services.h"
+#include "cachedcli.h"
+#include "cachelib.h"
+#include "config.h"
+#include "debug.h"
+#include "log.h"
+#include "parser.h"
+#include "query.h"
+#include "singletons.h"
+
+#ifndef CONFIG_PATH
+#define CONFIG_PATH "/etc/cached.conf"
+#endif
+#define DEFAULT_CONFIG_PATH "cached.conf"
+
+#define MAX_SOCKET_IO_SIZE 4096
+
+struct processing_thread_args {
+ cache the_cache;
+ struct configuration *the_configuration;
+ struct runtime_env *the_runtime_env;
+};
+
+static void accept_connection(struct kevent *, struct runtime_env *,
+ struct configuration *);
+static void destroy_cache_(cache);
+static void destroy_runtime_env(struct runtime_env *);
+static cache init_cache_(struct configuration *);
+static struct runtime_env *init_runtime_env(struct configuration *);
+static void print_version_info(void);
+static void processing_loop(cache, struct runtime_env *,
+ struct configuration *);
+static void process_socket_event(struct kevent *, struct runtime_env *,
+ struct configuration *);
+static void process_timer_event(struct kevent *, struct runtime_env *,
+ struct configuration *);
+static void *processing_thread(void *);
+static void usage(void);
+
+void get_time_func(struct timeval *);
+
+static void
+print_version_info(void)
+{
+ TRACE_IN(print_version_info);
+ printf("cached v0.2 (20 Oct 2005)\nwas developed during SoC 2005\n");
+ TRACE_OUT(print_version_info);
+}
+
+static void
+usage(void)
+{
+ fprintf(stderr,"usage: cached [-nstiId]\n");
+ exit(1);
+}
+
+static cache
+init_cache_(struct configuration *config)
+{
+ struct cache_params params;
+ cache retval;
+
+ struct configuration_entry *config_entry;
+ size_t size, i;
+ int res;
+
+ TRACE_IN(init_cache_);
+
+ memset(&params, 0, sizeof(struct cache_params));
+ params.get_time_func = get_time_func;
+ retval = init_cache(&params);
+
+ size = configuration_get_entries_size(config);
+ for (i = 0; i < size; ++i) {
+ config_entry = configuration_get_entry(config, i);
+ /*
+ * We should register common entries now - multipart entries
+ * would be registered automatically during the queries.
+ */
+ res = register_cache_entry(retval, (struct cache_entry_params *)
+ &config_entry->positive_cache_params);
+ config_entry->positive_cache_entry = find_cache_entry(retval,
+ config_entry->positive_cache_params.entry_name);
+ assert(config_entry->positive_cache_entry !=
+ INVALID_CACHE_ENTRY);
+
+ res = register_cache_entry(retval, (struct cache_entry_params *)
+ &config_entry->negative_cache_params);
+ config_entry->negative_cache_entry = find_cache_entry(retval,
+ config_entry->negative_cache_params.entry_name);
+ assert(config_entry->negative_cache_entry !=
+ INVALID_CACHE_ENTRY);
+ }
+
+ LOG_MSG_2("cache", "cache was successfully initialized");
+ TRACE_OUT(init_cache_);
+ return (retval);
+}
+
+static void
+destroy_cache_(cache the_cache)
+{
+ TRACE_IN(destroy_cache_);
+ destroy_cache(the_cache);
+ TRACE_OUT(destroy_cache_);
+}
+
+/*
+ * Socket and kqueues are prepared here. We have one global queue for both
+ * socket and timers events.
+ */
+static struct runtime_env *
+init_runtime_env(struct configuration *config)
+{
+ int serv_addr_len;
+ struct sockaddr_un serv_addr;
+
+ struct kevent eventlist;
+ struct timespec timeout;
+
+ struct runtime_env *retval;
+
+ TRACE_IN(init_runtime_env);
+ retval = (struct runtime_env *)malloc(sizeof(struct runtime_env));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct runtime_env));
+
+ retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0);
+
+ if (config->force_unlink == 1)
+ unlink(config->socket_path);
+
+ memset(&serv_addr, 0, sizeof(struct sockaddr_un));
+ serv_addr.sun_family = PF_LOCAL;
+ strncpy(serv_addr.sun_path, config->socket_path,
+ sizeof(serv_addr.sun_path));
+ serv_addr_len = sizeof(serv_addr.sun_family) +
+ strlen(serv_addr.sun_path) + 1;
+
+ if (bind(retval->sockfd, (struct sockaddr *)&serv_addr,
+ serv_addr_len) == -1) {
+ close(retval->sockfd);
+ free(retval);
+
+ LOG_ERR_2("runtime environment", "can't bind socket to path: "
+ "%s", config->socket_path);
+ TRACE_OUT(init_runtime_env);
+ return (NULL);
+ }
+ LOG_MSG_2("runtime environment", "using socket %s",
+ config->socket_path);
+
+ /*
+ * Here we're marking socket as non-blocking and setting its backlog
+ * to the maximum value
+ */
+ chmod(config->socket_path, config->socket_mode);
+ listen(retval->sockfd, -1);
+ fcntl(retval->sockfd, F_SETFL, O_NONBLOCK);
+
+ retval->queue = kqueue();
+ assert(retval->queue != -1);
+
+ EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT,
+ 0, 0, 0);
+ memset(&timeout, 0, sizeof(struct timespec));
+ kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
+
+ LOG_MSG_2("runtime environment", "successfully initialized");
+ TRACE_OUT(init_runtime_env);
+ return (retval);
+}
+
+static void
+destroy_runtime_env(struct runtime_env *env)
+{
+ TRACE_IN(destroy_runtime_env);
+ close(env->queue);
+ close(env->sockfd);
+ free(env);
+ TRACE_OUT(destroy_runtime_env);
+}
+
+static void
+accept_connection(struct kevent *event_data, struct runtime_env *env,
+ struct configuration *config)
+{
+ struct kevent eventlist[2];
+ struct timespec timeout;
+ struct query_state *qstate;
+
+ int fd;
+ int res;
+
+ uid_t euid;
+ gid_t egid;
+
+ TRACE_IN(accept_connection);
+ fd = accept(event_data->ident, NULL, NULL);
+ if (fd == -1) {
+ LOG_ERR_2("accept_connection", "error %d during accept()",
+ errno);
+ TRACE_OUT(accept_connection);
+ return;
+ }
+
+ if (getpeereid(fd, &euid, &egid) != 0) {
+ LOG_ERR_2("accept_connection", "error %d during getpeereid()",
+ errno);
+ TRACE_OUT(accept_connection);
+ return;
+ }
+
+ qstate = init_query_state(fd, sizeof(int), euid, egid);
+ if (qstate == NULL) {
+ LOG_ERR_2("accept_connection", "can't init query_state");
+ TRACE_OUT(accept_connection);
+ return;
+ }
+
+ memset(&timeout, 0, sizeof(struct timespec));
+ EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT,
+ 0, qstate->timeout.tv_sec * 1000, qstate);
+ EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT,
+ NOTE_LOWAT, qstate->kevent_watermark, qstate);
+ res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
+ if (res < 0)
+ LOG_ERR_2("accept_connection", "kevent error");
+
+ TRACE_OUT(accept_connection);
+}
+
+static void
+process_socket_event(struct kevent *event_data, struct runtime_env *env,
+ struct configuration *config)
+{
+ struct kevent eventlist[2];
+ struct timeval query_timeout;
+ struct timespec kevent_timeout;
+ int nevents;
+ int eof_res, res;
+ ssize_t io_res;
+ struct query_state *qstate;
+
+ TRACE_IN(process_socket_event);
+ eof_res = event_data->flags & EV_EOF ? 1 : 0;
+ res = 0;
+
+ memset(&kevent_timeout, 0, sizeof(struct timespec));
+ EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE,
+ 0, 0, NULL);
+ nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
+ if (nevents == -1) {
+ if (errno == ENOENT) {
+ /* the timer is already handling this event */
+ TRACE_OUT(process_socket_event);
+ return;
+ } else {
+ /* some other error happened */
+ LOG_ERR_2("process_socket_event", "kevent error, errno"
+ " is %d", errno);
+ TRACE_OUT(process_socket_event);
+ return;
+ }
+ }
+ qstate = (struct query_state *)event_data->udata;
+
+ /*
+ * If the buffer that is to be send/received is too large,
+ * we send it implicitly, by using query_io_buffer_read and
+ * query_io_buffer_write functions in the query_state. These functions
+ * use the temporary buffer, which is later send/received in parts.
+ * The code below implements buffer splitting/mergind for send/receive
+ * operations. It also does the actual socket IO operations.
+ */
+ if (((qstate->use_alternate_io == 0) &&
+ (qstate->kevent_watermark <= event_data->data)) ||
+ ((qstate->use_alternate_io != 0) &&
+ (qstate->io_buffer_watermark <= event_data->data))) {
+ if (qstate->use_alternate_io != 0) {
+ switch (qstate->io_buffer_filter) {
+ case EVFILT_READ:
+ io_res = query_socket_read(qstate,
+ qstate->io_buffer_p,
+ qstate->io_buffer_watermark);
+ if (io_res < 0) {
+ qstate->use_alternate_io = 0;
+ qstate->process_func = NULL;
+ } else {
+ qstate->io_buffer_p += io_res;
+ if (qstate->io_buffer_p ==
+ qstate->io_buffer +
+ qstate->io_buffer_size) {
+ qstate->io_buffer_p =
+ qstate->io_buffer;
+ qstate->use_alternate_io = 0;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (qstate->use_alternate_io == 0) {
+ do {
+ res = qstate->process_func(qstate);
+ } while ((qstate->kevent_watermark == 0) &&
+ (qstate->process_func != NULL) &&
+ (res == 0));
+
+ if (res != 0)
+ qstate->process_func = NULL;
+ }
+
+ if ((qstate->use_alternate_io != 0) &&
+ (qstate->io_buffer_filter == EVFILT_WRITE)) {
+ io_res = query_socket_write(qstate, qstate->io_buffer_p,
+ qstate->io_buffer_watermark);
+ if (io_res < 0) {
+ qstate->use_alternate_io = 0;
+ qstate->process_func = NULL;
+ } else
+ qstate->io_buffer_p += io_res;
+ }
+ } else {
+ /* assuming that socket was closed */
+ qstate->process_func = NULL;
+ qstate->use_alternate_io = 0;
+ }
+
+ if (((qstate->process_func == NULL) &&
+ (qstate->use_alternate_io == 0)) ||
+ (eof_res != 0) || (res != 0)) {
+ destroy_query_state(qstate);
+ close(event_data->ident);
+ TRACE_OUT(process_socket_event);
+ return;
+ }
+
+ /* updating the query_state lifetime variable */
+ get_time_func(&query_timeout);
+ query_timeout.tv_usec = 0;
+ query_timeout.tv_sec -= qstate->creation_time.tv_sec;
+ if (query_timeout.tv_sec > qstate->timeout.tv_sec)
+ query_timeout.tv_sec = 0;
+ else
+ query_timeout.tv_sec = qstate->timeout.tv_sec -
+ query_timeout.tv_sec;
+
+ if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p ==
+ qstate->io_buffer + qstate->io_buffer_size))
+ qstate->use_alternate_io = 0;
+
+ if (qstate->use_alternate_io == 0) {
+ /*
+ * If we must send/receive the large block of data,
+ * we should prepare the query_state's io_XXX fields.
+ * We should also substitute its write_func and read_func
+ * with the query_io_buffer_write and query_io_buffer_read,
+ * which will allow us to implicitly send/receive this large
+ * buffer later (in the subsequent calls to the
+ * process_socket_event).
+ */
+ if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
+ if (qstate->io_buffer != NULL)
+ free(qstate->io_buffer);
+
+ qstate->io_buffer = (char *)malloc(
+ qstate->kevent_watermark);
+ assert(qstate->io_buffer != NULL);
+ memset(qstate->io_buffer, 0, qstate->kevent_watermark);
+
+ qstate->io_buffer_p = qstate->io_buffer;
+ qstate->io_buffer_size = qstate->kevent_watermark;
+ qstate->io_buffer_filter = qstate->kevent_filter;
+
+ qstate->write_func = query_io_buffer_write;
+ qstate->read_func = query_io_buffer_read;
+
+ if (qstate->kevent_filter == EVFILT_READ)
+ qstate->use_alternate_io = 1;
+
+ qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
+ EV_SET(&eventlist[1], event_data->ident,
+ qstate->kevent_filter, EV_ADD | EV_ONESHOT,
+ NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
+ } else {
+ EV_SET(&eventlist[1], event_data->ident,
+ qstate->kevent_filter, EV_ADD | EV_ONESHOT,
+ NOTE_LOWAT, qstate->kevent_watermark, qstate);
+ }
+ } else {
+ if (qstate->io_buffer + qstate->io_buffer_size -
+ qstate->io_buffer_p <
+ MAX_SOCKET_IO_SIZE) {
+ qstate->io_buffer_watermark = qstate->io_buffer +
+ qstate->io_buffer_size - qstate->io_buffer_p;
+ EV_SET(&eventlist[1], event_data->ident,
+ qstate->io_buffer_filter,
+ EV_ADD | EV_ONESHOT, NOTE_LOWAT,
+ qstate->io_buffer_watermark,
+ qstate);
+ } else {
+ qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
+ EV_SET(&eventlist[1], event_data->ident,
+ qstate->io_buffer_filter, EV_ADD | EV_ONESHOT,
+ NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
+ }
+ }
+ EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER,
+ EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate);
+ kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
+
+ TRACE_OUT(process_socket_event);
+}
+
+/*
+ * This routine is called if timer event has been signaled in the kqueue. It
+ * just closes the socket and destroys the query_state.
+ */
+static void
+process_timer_event(struct kevent *event_data, struct runtime_env *env,
+ struct configuration *config)
+{
+ struct query_state *qstate;
+
+ TRACE_IN(process_timer_event);
+ qstate = (struct query_state *)event_data->udata;
+ destroy_query_state(qstate);
+ close(event_data->ident);
+ TRACE_OUT(process_timer_event);
+}
+
+/*
+ * Processing loop is the basic processing routine, that forms a body of each
+ * procssing thread
+ */
+static void
+processing_loop(cache the_cache, struct runtime_env *env,
+ struct configuration *config)
+{
+ struct timespec timeout;
+ const int eventlist_size = 1;
+ struct kevent eventlist[eventlist_size];
+ int nevents, i;
+
+ TRACE_MSG("=> processing_loop");
+ memset(&timeout, 0, sizeof(struct timespec));
+ memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size);
+
+ for (;;) {
+ nevents = kevent(env->queue, NULL, 0, eventlist,
+ eventlist_size, NULL);
+ /*
+ * we can only receive 1 event on success
+ */
+ if (nevents == 1) {
+ struct kevent *event_data;
+ event_data = &eventlist[0];
+
+ if (event_data->ident == env->sockfd) {
+ for (i = 0; i < event_data->data; ++i)
+ accept_connection(event_data, env, config);
+
+ EV_SET(eventlist, s_runtime_env->sockfd,
+ EVFILT_READ, EV_ADD | EV_ONESHOT,
+ 0, 0, 0);
+ memset(&timeout, 0,
+ sizeof(struct timespec));
+ kevent(s_runtime_env->queue, eventlist,
+ 1, NULL, 0, &timeout);
+
+ } else {
+ switch (event_data->filter) {
+ case EVFILT_READ:
+ case EVFILT_WRITE:
+ process_socket_event(event_data,
+ env, config);
+ break;
+ case EVFILT_TIMER:
+ process_timer_event(event_data,
+ env, config);
+ break;
+ default:
+ break;
+ }
+ }
+ } else {
+ /* this branch shouldn't be currently executed */
+ }
+ }
+
+ TRACE_MSG("<= processing_loop");
+}
+
+/*
+ * Wrapper above the processing loop function. It sets the thread signal mask
+ * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
+ */
+static void *
+processing_thread(void *data)
+{
+ struct processing_thread_args *args;
+ sigset_t new;
+
+ TRACE_MSG("=> processing_thread");
+ args = (struct processing_thread_args *)data;
+
+ sigemptyset(&new);
+ sigaddset(&new, SIGPIPE);
+ if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0)
+ LOG_ERR_1("processing thread",
+ "thread can't block the SIGPIPE signal");
+
+ processing_loop(args->the_cache, args->the_runtime_env,
+ args->the_configuration);
+ free(args);
+ TRACE_MSG("<= processing_thread");
+
+ return (NULL);
+}
+
+void
+get_time_func(struct timeval *time)
+{
+ struct timespec res;
+ memset(&res, 0, sizeof(struct timespec));
+ clock_gettime(CLOCK_MONOTONIC, &res);
+
+ time->tv_sec = res.tv_sec;
+ time->tv_usec = 0;
+}
+
+/*
+ * The idea of _nss_cache_cycle_prevention_function is that nsdispatch will
+ * search for this symbol in the executable. This symbol is the attribute of
+ * the caching daemon. So, if it exists, nsdispatch won't try to connect to
+ * the caching daemon and will just ignore the 'cache' source in the
+ * nsswitch.conf. This method helps to avoid cycles and organize
+ * self-performing requests.
+ */
+void
+_nss_cache_cycle_prevention_function(void)
+{
+}
+
+int
+main(int argc, char *argv[])
+{
+ struct processing_thread_args *thread_args;
+ pthread_t *threads;
+
+ struct pidfh *pidfile;
+ pid_t pid;
+
+ char const *config_file;
+ char const *error_str;
+ int error_line;
+ int i, res;
+
+ int trace_mode_enabled;
+ int force_single_threaded;
+ int do_not_daemonize;
+ int clear_user_cache_entries, clear_all_cache_entries;
+ char *user_config_entry_name, *global_config_entry_name;
+ int show_statistics;
+ int daemon_mode, interactive_mode;
+
+
+ /* by default all debug messages are omitted */
+ TRACE_OFF();
+
+ /* startup output */
+ print_version_info();
+
+ /* parsing command line arguments */
+ trace_mode_enabled = 0;
+ force_single_threaded = 0;
+ do_not_daemonize = 0;
+ clear_user_cache_entries = 0;
+ clear_all_cache_entries = 0;
+ show_statistics = 0;
+ user_config_entry_name = NULL;
+ global_config_entry_name = NULL;
+ while ((res = getopt(argc, argv, "nstdi:I:")) != -1) {
+ switch (res) {
+ case 'n':
+ do_not_daemonize = 1;
+ break;
+ case 's':
+ force_single_threaded = 1;
+ break;
+ case 't':
+ trace_mode_enabled = 1;
+ break;
+ case 'i':
+ clear_user_cache_entries = 1;
+ if (optarg != NULL)
+ if (strcmp(optarg, "all") != 0)
+ user_config_entry_name = strdup(optarg);
+ break;
+ case 'I':
+ clear_all_cache_entries = 1;
+ if (optarg != NULL)
+ if (strcmp(optarg, "all") != 0)
+ global_config_entry_name =
+ strdup(optarg);
+ break;
+ case 'd':
+ show_statistics = 1;
+ break;
+ case '?':
+ default:
+ usage();
+ /* NOT REACHED */
+ }
+ }
+
+ daemon_mode = do_not_daemonize | force_single_threaded |
+ trace_mode_enabled;
+ interactive_mode = clear_user_cache_entries | clear_all_cache_entries |
+ show_statistics;
+
+ if ((daemon_mode != 0) && (interactive_mode != 0)) {
+ LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
+ "can't be used together");
+ usage();
+ }
+
+ if (interactive_mode != 0) {
+ FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r");
+ char pidbuf[256];
+
+ struct cached_connection_params connection_params;
+ cached_connection connection;
+
+ int result;
+
+ if (pidfin == NULL)
+ errx(EXIT_FAILURE, "There is no daemon running.");
+
+ memset(pidbuf, 0, sizeof(pidbuf));
+ fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin);
+ fclose(pidfin);
+
+ if (ferror(pidfin) != 0)
+ errx(EXIT_FAILURE, "Can't read from pidfile.");
+
+ if (sscanf(pidbuf, "%d", &pid) != 1)
+ errx(EXIT_FAILURE, "Invalid pidfile.");
+ LOG_MSG_1("main", "daemon PID is %d", pid);
+
+
+ memset(&connection_params, 0,
+ sizeof(struct cached_connection_params));
+ connection_params.socket_path = DEFAULT_SOCKET_PATH;
+ connection = open_cached_connection__(&connection_params);
+ if (connection == INVALID_CACHED_CONNECTION)
+ errx(EXIT_FAILURE, "Can't connect to the daemon.");
+
+ if (clear_user_cache_entries != 0) {
+ result = cached_transform__(connection,
+ user_config_entry_name, TT_USER);
+ if (result != 0)
+ LOG_MSG_1("main",
+ "user cache transformation failed");
+ else
+ LOG_MSG_1("main",
+ "user cache_transformation "
+ "succeeded");
+ }
+
+ if (clear_all_cache_entries != 0) {
+ if (geteuid() != 0)
+ errx(EXIT_FAILURE, "Only root can initiate "
+ "global cache transformation.");
+
+ result = cached_transform__(connection,
+ global_config_entry_name, TT_ALL);
+ if (result != 0)
+ LOG_MSG_1("main",
+ "global cache transformation "
+ "failed");
+ else
+ LOG_MSG_1("main",
+ "global cache transformation "
+ "succeeded");
+ }
+
+ close_cached_connection__(connection);
+
+ free(user_config_entry_name);
+ free(global_config_entry_name);
+ return (EXIT_SUCCESS);
+ }
+
+ pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid);
+ if (pidfile == NULL) {
+ if (errno == EEXIST)
+ errx(EXIT_FAILURE, "Daemon already running, pid: %d.",
+ pid);
+ warn("Cannot open or create pidfile");
+ }
+
+ if (trace_mode_enabled == 1)
+ TRACE_ON();
+
+ /* blocking the main thread from receiving SIGPIPE signal */
+ sigblock(sigmask(SIGPIPE));
+
+ /* daemonization */
+ if (do_not_daemonize == 0) {
+ res = daemon(0, trace_mode_enabled == 0 ? 0 : 1);
+ if (res != 0) {
+ LOG_ERR_1("main", "can't daemonize myself: %s",
+ strerror(errno));
+ pidfile_remove(pidfile);
+ goto fin;
+ } else
+ LOG_MSG_1("main", "successfully daemonized");
+ }
+
+ pidfile_write(pidfile);
+
+ s_agent_table = init_agent_table();
+ register_agent(s_agent_table, init_passwd_agent());
+ register_agent(s_agent_table, init_passwd_mp_agent());
+ register_agent(s_agent_table, init_group_agent());
+ register_agent(s_agent_table, init_group_mp_agent());
+ register_agent(s_agent_table, init_services_agent());
+ register_agent(s_agent_table, init_services_mp_agent());
+ LOG_MSG_1("main", "request agents registered successfully");
+
+ /*
+ * Hosts agent can't work properly until we have access to the
+ * appropriate dtab structures, which are used in nsdispatch
+ * calls
+ *
+ register_agent(s_agent_table, init_hosts_agent());
+ */
+
+ /* configuration initialization */
+ s_configuration = init_configuration();
+ fill_configuration_defaults(s_configuration);
+
+ error_str = NULL;
+ error_line = 0;
+ config_file = CONFIG_PATH;
+
+ res = parse_config_file(s_configuration, config_file, &error_str,
+ &error_line);
+ if ((res != 0) && (error_str == NULL)) {
+ config_file = DEFAULT_CONFIG_PATH;
+ res = parse_config_file(s_configuration, config_file,
+ &error_str, &error_line);
+ }
+
+ if (res != 0) {
+ if (error_str != NULL) {
+ LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
+ config_file, error_line, error_str);
+ } else {
+ LOG_ERR_1("main", "no configuration file found "
+ "- was looking for %s and %s",
+ CONFIG_PATH, DEFAULT_CONFIG_PATH);
+ }
+ destroy_configuration(s_configuration);
+ return (-1);
+ }
+
+ if (force_single_threaded == 1)
+ s_configuration->threads_num = 1;
+
+ /* cache initialization */
+ s_cache = init_cache_(s_configuration);
+ if (s_cache == NULL) {
+ LOG_ERR_1("main", "can't initialize the cache");
+ destroy_configuration(s_configuration);
+ return (-1);
+ }
+
+ /* runtime environment initialization */
+ s_runtime_env = init_runtime_env(s_configuration);
+ if (s_runtime_env == NULL) {
+ LOG_ERR_1("main", "can't initialize the runtime environment");
+ destroy_configuration(s_configuration);
+ destroy_cache_(s_cache);
+ return (-1);
+ }
+
+ if (s_configuration->threads_num > 1) {
+ threads = (pthread_t *)malloc(sizeof(pthread_t) *
+ s_configuration->threads_num);
+ memset(threads, 0, sizeof(pthread_t) *
+ s_configuration->threads_num);
+ for (i = 0; i < s_configuration->threads_num; ++i) {
+ thread_args = (struct processing_thread_args *)malloc(
+ sizeof(struct processing_thread_args));
+ thread_args->the_cache = s_cache;
+ thread_args->the_runtime_env = s_runtime_env;
+ thread_args->the_configuration = s_configuration;
+
+ LOG_MSG_1("main", "thread #%d was successfully created",
+ i);
+ pthread_create(&threads[i], NULL, processing_thread,
+ thread_args);
+
+ thread_args = NULL;
+ }
+
+ for (i = 0; i < s_configuration->threads_num; ++i)
+ pthread_join(threads[i], NULL);
+ } else {
+ LOG_MSG_1("main", "working in single-threaded mode");
+ processing_loop(s_cache, s_runtime_env, s_configuration);
+ }
+
+fin:
+ /* runtime environment destruction */
+ destroy_runtime_env(s_runtime_env);
+
+ /* cache destruction */
+ destroy_cache_(s_cache);
+
+ /* configuration destruction */
+ destroy_configuration(s_configuration);
+
+ /* agents table destruction */
+ destroy_agent_table(s_agent_table);
+
+ pidfile_remove(pidfile);
+ return (EXIT_SUCCESS);
+}
diff --git a/usr.sbin/nscd/nscd.conf.5 b/usr.sbin/nscd/nscd.conf.5
new file mode 100644
index 0000000..de6d43a
--- /dev/null
+++ b/usr.sbin/nscd/nscd.conf.5
@@ -0,0 +1,102 @@
+.\" Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+
+.Dd August 29, 2005
+.Dt CACHED.CONF 5
+.Os
+.Sh NAME
+.Nm cached.conf
+.Nd "caching daemon configuration file manual page"
+.Sh "DESCRIPTION"
+.Nm
+is used by the
+.Xr cached 8
+daemon and is read on its startup. Its syntax is mostly similar to the
+nscd.conf syntax in Linux and Solaris. It has some differences, though -
+see them below.
+.Pp
+Each line specifies either an attribute and a value, or an attribute,
+a cachename and a value. Usual cachenames are passwd, groups, hosts,
+services, protocols and rpc. You can also use any other cachename
+(for example, if some third-party application uses nsswitch).
+.Pp
+.Bl -tag -width Pair
+.It Sy threads [value]
+Number of threads, which would listen for connections and process requests. The
+minimum is 1. The default value is 8.
+.It Sy enable-cache [cachename] [yes|no]
+Enables or disables the cache for specified cachename.
+.It Sy positive-time-to-live [cachename] [value]
+Sets the TTL (time-to-live) for the specified cache in seconds. Larger values
+can increase system's performance, but they also can affect the cache
+coherence. The default value is 3600.
+.It Sy positive-policy [cachename] [fifo|lru|lfu]
+The policy that is applied to erase some of the cache elements, when the
+size limit of the given cachename is exceeded. Possible policies are:
+fifo (first-in-first-out), lru (least-recently-used),
+lfu (least-frequently-used). The default policy is
+.It Sy negative-time-to-live [cachename] [value]
+The TTL of the negative cached elements in seconds. The larger values can
+significantly increase system performance in some environments
+(when dealing with files with UIDs, which are not in system databases,
+for example). This number should be kept low to avoid the cache
+coherence problems. The default value is 60.
+.It Sy negative-policy [cachename] [fifo|lru|lfu]
+The same as the positive-policy, but this one is applied to the negative
+elements of the given cachename. The default policy is
+.It Sy suggested-size [cachename] [value]
+This is the internal hash table size. The value should be a prime number
+for optimum performance. You should only change this value when the number
+of cached elements is significantly (in 5-10 times) greater then the default
+hash table size (255).
+.It Sy keep-hot-count [cachename] [value]
+The size limit of the cache with the given cachename. When it is exceeded,
+the policy will be applied. The default value is 2048.
+.It Sy perform-actual-lookups [cachename] [yes|no]
+If enabled, the
+.Xr cached 8
+doesn't simply receive and cache the NSS-requests results, but performs
+all the lookups by itself and only returns the responses. If this feature is
+enabled, then for the given cachename
+.Xr cached 8
+will act similarly to the NSCD.
+.Pp
+.Pa NOTE:
+this feature is currently experimental - it supports only passwd, groups and
+services cachenames.
+.Sh "NOTES"
+You can use
+.Sq #
+symbol at the beginning of the line for comments.
+.Sh "SEE ALSO"
+.Xr cached 8
+.Sh "AUTHORS"
+Michael Bushkov
+.Aq bushman@rsu.ru
+.Sh "BUGS"
+To report bugs or suggestions please mail me
+.Aq bushman@rsu.ru
diff --git a/usr.sbin/nscd/nscdcli.c b/usr.sbin/nscd/nscdcli.c
new file mode 100644
index 0000000..c78c875
--- /dev/null
+++ b/usr.sbin/nscd/nscdcli.c
@@ -0,0 +1,284 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/event.h>
+#include <sys/uio.h>
+#include <sys/un.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "debug.h"
+#include "cachedcli.h"
+#include "protocol.h"
+
+#define DEFAULT_CACHED_IO_TIMEOUT 4
+
+static int safe_write(struct cached_connection_ *, const void *, size_t);
+static int safe_read(struct cached_connection_ *, void *, size_t);
+static int send_credentials(struct cached_connection_ *, int);
+
+static int
+safe_write(struct cached_connection_ *connection, const void *data,
+ size_t data_size)
+{
+ struct kevent eventlist;
+ int nevents;
+ size_t result;
+ ssize_t s_result;
+ struct timespec timeout;
+
+ if (data_size == 0)
+ return (0);
+
+ timeout.tv_sec = DEFAULT_CACHED_IO_TIMEOUT;
+ timeout.tv_nsec = 0;
+ result = 0;
+ do {
+ nevents = kevent(connection->write_queue, NULL, 0, &eventlist,
+ 1, &timeout);
+ if ((nevents == 1) && (eventlist.filter == EVFILT_WRITE)) {
+ s_result = write(connection->sockfd, data + result,
+ eventlist.data < data_size - result ?
+ eventlist.data : data_size - result);
+ if (s_result == -1)
+ return (-1);
+ else
+ result += s_result;
+
+ if (eventlist.flags & EV_EOF)
+ return (result < data_size ? -1 : 0);
+ } else
+ return (-1);
+ } while (result < data_size);
+
+ return (0);
+}
+
+static int
+safe_read(struct cached_connection_ *connection, void *data, size_t data_size)
+{
+ struct kevent eventlist;
+ size_t result;
+ ssize_t s_result;
+ struct timespec timeout;
+ int nevents;
+
+ if (data_size == 0)
+ return (0);
+
+ timeout.tv_sec = DEFAULT_CACHED_IO_TIMEOUT;
+ timeout.tv_nsec = 0;
+ result = 0;
+ do {
+ nevents = kevent(connection->read_queue, NULL, 0, &eventlist, 1,
+ &timeout);
+ if ((nevents == 1) && (eventlist.filter == EVFILT_READ)) {
+ s_result = read(connection->sockfd, data + result,
+ eventlist.data <= data_size - result ? eventlist.data :
+ data_size - result);
+ if (s_result == -1)
+ return (-1);
+ else
+ result += s_result;
+
+ if (eventlist.flags & EV_EOF)
+ return (result < data_size ? -1 : 0);
+ } else
+ return (-1);
+ } while (result < data_size);
+
+ return (0);
+}
+
+static int
+send_credentials(struct cached_connection_ *connection, int type)
+{
+ struct kevent eventlist;
+ int nevents;
+ ssize_t result;
+ int res;
+
+ struct msghdr cred_hdr;
+ struct iovec iov;
+
+ struct {
+ struct cmsghdr hdr;
+ struct cmsgcred creds;
+ } cmsg;
+
+ TRACE_IN(send_credentials);
+ memset(&cmsg, 0, sizeof(cmsg));
+ cmsg.hdr.cmsg_len = sizeof(cmsg);
+ cmsg.hdr.cmsg_level = SOL_SOCKET;
+ cmsg.hdr.cmsg_type = SCM_CREDS;
+
+ memset(&cred_hdr, 0, sizeof(struct msghdr));
+ cred_hdr.msg_iov = &iov;
+ cred_hdr.msg_iovlen = 1;
+ cred_hdr.msg_control = &cmsg;
+ cred_hdr.msg_controllen = sizeof(cmsg);
+
+ iov.iov_base = &type;
+ iov.iov_len = sizeof(int);
+
+ EV_SET(&eventlist, connection->sockfd, EVFILT_WRITE, EV_ADD,
+ NOTE_LOWAT, sizeof(int), NULL);
+ res = kevent(connection->write_queue, &eventlist, 1, NULL, 0, NULL);
+
+ nevents = kevent(connection->write_queue, NULL, 0, &eventlist, 1, NULL);
+ if ((nevents == 1) && (eventlist.filter == EVFILT_WRITE)) {
+ result = (sendmsg(connection->sockfd, &cred_hdr, 0) == -1) ? -1
+ : 0;
+ EV_SET(&eventlist, connection->sockfd, EVFILT_WRITE, EV_ADD,
+ 0, 0, NULL);
+ kevent(connection->write_queue, &eventlist, 1, NULL, 0, NULL);
+ TRACE_OUT(send_credentials);
+ return (result);
+ } else {
+ TRACE_OUT(send_credentials);
+ return (-1);
+ }
+}
+
+struct cached_connection_ *
+open_cached_connection__(struct cached_connection_params const *params)
+{
+ struct cached_connection_ *retval;
+ struct kevent eventlist;
+ struct sockaddr_un client_address;
+ int client_address_len, client_socket;
+ int res;
+
+ TRACE_IN(open_cached_connection);
+ assert(params != NULL);
+
+ client_socket = socket(PF_LOCAL, SOCK_STREAM, 0);
+ client_address.sun_family = PF_LOCAL;
+ strncpy(client_address.sun_path, params->socket_path,
+ sizeof(client_address.sun_path));
+ client_address_len = sizeof(client_address.sun_family) +
+ strlen(client_address.sun_path) + 1;
+
+ res = connect(client_socket, (struct sockaddr *)&client_address,
+ client_address_len);
+ if (res == -1) {
+ close(client_socket);
+ TRACE_OUT(open_cached_connection);
+ return (NULL);
+ }
+ fcntl(client_socket, F_SETFL, O_NONBLOCK);
+
+ retval = malloc(sizeof(struct cached_connection_));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct cached_connection_));
+
+ retval->sockfd = client_socket;
+
+ retval->write_queue = kqueue();
+ assert(retval->write_queue != -1);
+
+ EV_SET(&eventlist, retval->sockfd, EVFILT_WRITE, EV_ADD,
+ 0, 0, NULL);
+ res = kevent(retval->write_queue, &eventlist, 1, NULL, 0, NULL);
+
+ retval->read_queue = kqueue();
+ assert(retval->read_queue != -1);
+
+ EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD,
+ 0, 0, NULL);
+ res = kevent(retval->read_queue, &eventlist, 1, NULL, 0, NULL);
+
+ TRACE_OUT(open_cached_connection);
+ return (retval);
+}
+
+void
+close_cached_connection__(struct cached_connection_ *connection)
+{
+
+ TRACE_IN(close_cached_connection);
+ assert(connection != NULL);
+
+ close(connection->sockfd);
+ close(connection->read_queue);
+ close(connection->write_queue);
+ free(connection);
+ TRACE_OUT(close_cached_connection);
+}
+
+int
+cached_transform__(struct cached_connection_ *connection,
+ const char *entry_name, int transformation_type)
+{
+ size_t name_size;
+ int error_code;
+ int result;
+
+ TRACE_IN(cached_transform);
+
+ error_code = -1;
+ result = 0;
+ result = send_credentials(connection, CET_TRANSFORM_REQUEST);
+ if (result != 0)
+ goto fin;
+
+ if (entry_name != NULL)
+ name_size = strlen(entry_name);
+ else
+ name_size = 0;
+
+ result = safe_write(connection, &name_size, sizeof(size_t));
+ if (result != 0)
+ goto fin;
+
+ result = safe_write(connection, &transformation_type, sizeof(int));
+ if (result != 0)
+ goto fin;
+
+ if (entry_name != NULL) {
+ result = safe_write(connection, entry_name, name_size);
+ if (result != 0)
+ goto fin;
+ }
+
+ result = safe_read(connection, &error_code, sizeof(int));
+ if (result != 0)
+ error_code = -1;
+
+fin:
+ TRACE_OUT(cached_transform);
+ return (error_code);
+}
diff --git a/usr.sbin/nscd/nscdcli.h b/usr.sbin/nscd/nscdcli.h
new file mode 100644
index 0000000..58d9ccc
--- /dev/null
+++ b/usr.sbin/nscd/nscdcli.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2004 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_CACHEDCLI_H__
+#define __CACHED_CACHEDCLI_H__
+
+struct cached_connection_params {
+ char *socket_path;
+ struct timeval timeout;
+};
+
+struct cached_connection_ {
+ int sockfd;
+ int read_queue;
+ int write_queue;
+};
+
+/* simple abstractions for not to write "struct" every time */
+typedef struct cached_connection_ *cached_connection;
+typedef struct cached_connection_ *cached_mp_write_session;
+typedef struct cached_connection_ *cached_mp_read_session;
+
+#define INVALID_CACHED_CONNECTION (NULL)
+
+/* initialization/destruction routines */
+extern cached_connection open_cached_connection__(
+ struct cached_connection_params const *);
+extern void close_cached_connection__(cached_connection);
+
+extern int cached_transform__(cached_connection, const char *, int);
+
+#endif
diff --git a/usr.sbin/nscd/parser.c b/usr.sbin/nscd/parser.c
new file mode 100644
index 0000000..b877efa
--- /dev/null
+++ b/usr.sbin/nscd/parser.c
@@ -0,0 +1,474 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "debug.h"
+#include "log.h"
+#include "parser.h"
+
+static void enable_cache(struct configuration *,const char *, int);
+static struct configuration_entry *find_create_entry(struct configuration *,
+ const char *);
+static int get_number(const char *, int, int);
+static enum cache_policy_t get_policy(const char *);
+static int get_yesno(const char *);
+static int check_cachename(const char *);
+static void check_files(struct configuration *, const char *, int);
+static void set_keep_hot_count(struct configuration *, const char *, int);
+static void set_negative_policy(struct configuration *, const char *,
+ enum cache_policy_t);
+static void set_negative_time_to_live(struct configuration *,
+ const char *, int);
+static void set_positive_policy(struct configuration *, const char *,
+ enum cache_policy_t);
+static void set_perform_actual_lookups(struct configuration *, const char *,
+ int);
+static void set_positive_time_to_live(struct configuration *,
+ const char *, int);
+static void set_suggested_size(struct configuration *, const char *,
+ int size);
+static void set_threads_num(struct configuration *, int);
+static int strbreak(char *, char **, int);
+
+static int
+strbreak(char *str, char **fields, int fields_size)
+{
+ char *c = str;
+ int i, num;
+
+ TRACE_IN(strbreak);
+ num = 0;
+ for (i = 0;
+ ((*fields =
+ strsep(i < fields_size ? &c : NULL, "\n\t ")) != NULL);
+ ++i)
+ if ((*(*fields)) != '\0') {
+ ++fields;
+ ++num;
+ }
+
+ TRACE_OUT(strbreak);
+ return (num);
+}
+
+/*
+ * Tries to find the configuration entry with the specified name. If search
+ * fails, the new entry with the default parameters will be created.
+ */
+static struct configuration_entry *
+find_create_entry(struct configuration *config,
+ const char *entry_name)
+{
+ struct configuration_entry *entry = NULL;
+ int res;
+
+ TRACE_IN(find_create_entry);
+ entry = configuration_find_entry(config, entry_name);
+ if (entry == NULL) {
+ entry = create_def_configuration_entry(entry_name);
+ assert( entry != NULL);
+ res = add_configuration_entry(config, entry);
+ assert(res == 0);
+ }
+
+ TRACE_OUT(find_create_entry);
+ return (entry);
+}
+
+/*
+ * The vast majority of the functions below corresponds to the particular
+ * keywords in the configuration file.
+ */
+static void
+enable_cache(struct configuration *config, const char *entry_name, int flag)
+{
+ struct configuration_entry *entry;
+
+ TRACE_IN(enable_cache);
+ entry = find_create_entry(config, entry_name);
+ entry->enabled = flag;
+ TRACE_OUT(enable_cache);
+}
+
+static void
+set_positive_time_to_live(struct configuration *config,
+ const char *entry_name, int ttl)
+{
+ struct configuration_entry *entry;
+ struct timeval lifetime;
+
+ TRACE_IN(set_positive_time_to_live);
+ assert(ttl >= 0);
+ assert(entry_name != NULL);
+ memset(&lifetime, 0, sizeof(struct timeval));
+ lifetime.tv_sec = ttl;
+
+ entry = find_create_entry(config, entry_name);
+ memcpy(&entry->positive_cache_params.max_lifetime,
+ &lifetime, sizeof(struct timeval));
+ memcpy(&entry->mp_cache_params.max_lifetime,
+ &lifetime, sizeof(struct timeval));
+
+ TRACE_OUT(set_positive_time_to_live);
+}
+
+static void
+set_negative_time_to_live(struct configuration *config,
+ const char *entry_name, int nttl)
+{
+ struct configuration_entry *entry;
+ struct timeval lifetime;
+
+ TRACE_IN(set_negative_time_to_live);
+ assert(nttl > 0);
+ assert(entry_name != NULL);
+ memset(&lifetime, 0, sizeof(struct timeval));
+ lifetime.tv_sec = nttl;
+
+ entry = find_create_entry(config, entry_name);
+ assert(entry != NULL);
+ memcpy(&entry->negative_cache_params.max_lifetime,
+ &lifetime, sizeof(struct timeval));
+
+ TRACE_OUT(set_negative_time_to_live);
+}
+
+/*
+ * Hot count is actually the elements size limit.
+ */
+static void
+set_keep_hot_count(struct configuration *config,
+ const char *entry_name, int count)
+{
+ struct configuration_entry *entry;
+
+ TRACE_IN(set_keep_hot_count);
+ assert(count >= 0);
+ assert(entry_name != NULL);
+
+ entry = find_create_entry(config, entry_name);
+ assert(entry != NULL);
+ entry->positive_cache_params.max_elemsize = count;
+
+ entry = find_create_entry(config, entry_name);
+ assert(entry != NULL);
+ entry->negative_cache_params.max_elemsize = count;
+
+ TRACE_OUT(set_keep_hot_count);
+}
+
+static void
+set_positive_policy(struct configuration *config,
+ const char *entry_name, enum cache_policy_t policy)
+{
+ struct configuration_entry *entry;
+
+ TRACE_IN(set_positive_policy);
+ assert(entry_name != NULL);
+
+ entry = find_create_entry(config, entry_name);
+ assert(entry != NULL);
+ entry->positive_cache_params.policy = policy;
+
+ TRACE_OUT(set_positive_policy);
+}
+
+static void
+set_negative_policy(struct configuration *config,
+ const char *entry_name, enum cache_policy_t policy)
+{
+ struct configuration_entry *entry;
+
+ TRACE_IN(set_negative_policy);
+ assert(entry_name != NULL);
+
+ entry = find_create_entry(config, entry_name);
+ assert(entry != NULL);
+ entry->negative_cache_params.policy = policy;
+
+ TRACE_OUT(set_negative_policy);
+}
+
+static void
+set_perform_actual_lookups(struct configuration *config,
+ const char *entry_name, int flag)
+{
+ struct configuration_entry *entry;
+
+ TRACE_IN(set_perform_actual_lookups);
+ assert(entry_name != NULL);
+
+ entry = find_create_entry(config, entry_name);
+ assert(entry != NULL);
+ entry->perform_actual_lookups = flag;
+
+ TRACE_OUT(set_perform_actual_lookups);
+}
+
+static void
+set_suggested_size(struct configuration *config,
+ const char *entry_name, int size)
+{
+ struct configuration_entry *entry;
+
+ TRACE_IN(set_suggested_size);
+ assert(config != NULL);
+ assert(entry_name != NULL);
+ assert(size > 0);
+
+ entry = find_create_entry(config, entry_name);
+ assert(entry != NULL);
+ entry->positive_cache_params.cache_entries_size = size;
+ entry->negative_cache_params.cache_entries_size = size;
+
+ TRACE_OUT(set_suggested_size);
+}
+
+static void
+check_files(struct configuration *config, const char *entry_name, int flag)
+{
+
+ TRACE_IN(check_files);
+ assert(entry_name != NULL);
+ TRACE_OUT(check_files);
+}
+
+static int
+get_yesno(const char *str)
+{
+
+ if (strcmp(str, "yes") == 0)
+ return (1);
+ else if (strcmp(str, "no") == 0)
+ return (0);
+ else
+ return (-1);
+}
+
+static int
+get_number(const char *str, int low, int max)
+{
+
+ char *end = NULL;
+ int res = 0;
+
+ if (str[0] == '\0')
+ return (-1);
+
+ res = strtol(str, &end, 10);
+ if (*end != '\0')
+ return (-1);
+ else
+ if (((res >= low) || (low == -1)) &&
+ ((res <= max) || (max == -1)))
+ return (res);
+ else
+ return (-2);
+}
+
+static enum cache_policy_t
+get_policy(const char *str)
+{
+
+ if (strcmp(str, "fifo") == 0)
+ return (CPT_FIFO);
+ else if (strcmp(str, "lru") == 0)
+ return (CPT_LRU);
+ else if (strcmp(str, "lfu") == 0)
+ return (CPT_LFU);
+
+ return (-1);
+}
+
+static int
+check_cachename(const char *str)
+{
+
+ assert(str != NULL);
+ return ((strlen(str) > 0) ? 0 : -1);
+}
+
+static void
+set_threads_num(struct configuration *config, int value)
+{
+
+ assert(config != NULL);
+ config->threads_num = value;
+}
+
+/*
+ * The main configuration routine. Its implementation is hugely inspired by the
+ * the same routine implementation in Solaris NSCD.
+ */
+int
+parse_config_file(struct configuration *config,
+ const char *fname, char const **error_str, int *error_line)
+{
+ FILE *fin;
+ char buffer[255];
+ char *fields[128];
+ int field_count, line_num, value;
+ int res;
+
+ TRACE_IN(parse_config_file);
+ assert(config != NULL);
+ assert(fname != NULL);
+
+ fin = fopen(fname, "r");
+ if (fin == NULL) {
+ TRACE_OUT(parse_config_file);
+ return (-1);
+ }
+
+ res = 0;
+ line_num = 0;
+ memset(buffer, 0, sizeof(buffer));
+ while ((res == 0) && (fgets(buffer, sizeof(buffer) - 1, fin) != NULL)) {
+ field_count = strbreak(buffer, fields, sizeof(fields));
+ ++line_num;
+
+ if (field_count == 0)
+ continue;
+
+ switch (fields[0][0]) {
+ case '#':
+ case '\0':
+ continue;
+ case 'e':
+ if ((field_count == 3) &&
+ (strcmp(fields[0], "enable-cache") == 0) &&
+ (check_cachename(fields[1]) == 0) &&
+ ((value = get_yesno(fields[2])) != -1)) {
+ enable_cache(config, fields[1], value);
+ continue;
+ }
+ break;
+ case 'd':
+ if ((field_count == 2) &&
+ (strcmp(fields[0], "debug-level") == 0) &&
+ ((value = get_number(fields[1], 0, 10)) != -1)) {
+ continue;
+ }
+ break;
+ case 'p':
+ if ((field_count == 3) &&
+ (strcmp(fields[0], "positive-time-to-live") == 0) &&
+ (check_cachename(fields[1]) == 0) &&
+ ((value = get_number(fields[2], 0, -1)) != -1)) {
+ set_positive_time_to_live(config,
+ fields[1], value);
+ continue;
+ } else if ((field_count == 3) &&
+ (strcmp(fields[0], "positive-policy") == 0) &&
+ (check_cachename(fields[1]) == 0) &&
+ ((value = get_policy(fields[2])) != -1)) {
+ set_positive_policy(config, fields[1], value);
+ continue;
+ } else if ((field_count == 3) &&
+ (strcmp(fields[0], "perform-actual-lookups") == 0) &&
+ (check_cachename(fields[1]) == 0) &&
+ ((value = get_yesno(fields[2])) != -1)) {
+ set_perform_actual_lookups(config, fields[1],
+ value);
+ continue;
+ }
+ break;
+ case 'n':
+ if ((field_count == 3) &&
+ (strcmp(fields[0], "negative-time-to-live") == 0) &&
+ (check_cachename(fields[1]) == 0) &&
+ ((value = get_number(fields[2], 0, -1)) != -1)) {
+ set_negative_time_to_live(config,
+ fields[1], value);
+ continue;
+ } else if ((field_count == 3) &&
+ (strcmp(fields[0], "negative-policy") == 0) &&
+ (check_cachename(fields[1]) == 0) &&
+ ((value = get_policy(fields[2])) != -1)) {
+ set_negative_policy(config,
+ fields[1], value);
+ continue;
+ }
+ break;
+ case 's':
+ if ((field_count == 3) &&
+ (strcmp(fields[0], "suggested-size") == 0) &&
+ (check_cachename(fields[1]) == 0) &&
+ ((value = get_number(fields[2], 1, -1)) != -1)) {
+ set_suggested_size(config, fields[1], value);
+ continue;
+ }
+ break;
+ case 't':
+ if ((field_count == 2) &&
+ (strcmp(fields[0], "threads") == 0) &&
+ ((value = get_number(fields[1], 1, -1)) != -1)) {
+ set_threads_num(config, value);
+ continue;
+ }
+ break;
+ case 'k':
+ if ((field_count == 3) &&
+ (strcmp(fields[0], "keep-hot-count") == 0) &&
+ (check_cachename(fields[1]) == 0) &&
+ ((value = get_number(fields[2], 0, -1)) != -1)) {
+ set_keep_hot_count(config,
+ fields[1], value);
+ continue;
+ }
+ break;
+ case 'c':
+ if ((field_count == 3) &&
+ (strcmp(fields[0], "check-files") == 0) &&
+ (check_cachename(fields[1]) == 0) &&
+ ((value = get_yesno(fields[2])) != -1)) {
+ check_files(config,
+ fields[1], value);
+ continue;
+ }
+ break;
+ default:
+ break;
+ }
+
+ LOG_ERR_2("config file parser", "error in file "
+ "%s on line %d", fname, line_num);
+ *error_str = "syntax error";
+ *error_line = line_num;
+ res = -1;
+ }
+ fclose(fin);
+
+ TRACE_OUT(parse_config_file);
+ return (res);
+}
diff --git a/usr.sbin/nscd/parser.h b/usr.sbin/nscd/parser.h
new file mode 100644
index 0000000..54cc898
--- /dev/null
+++ b/usr.sbin/nscd/parser.h
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_PARSER_H__
+#define __CACHED_PARSER_H__
+
+extern int parse_config_file(struct configuration *,
+ const char *, char const **, int *);
+
+#endif
diff --git a/usr.sbin/nscd/protocol.c b/usr.sbin/nscd/protocol.c
new file mode 100644
index 0000000..08cea92
--- /dev/null
+++ b/usr.sbin/nscd/protocol.c
@@ -0,0 +1,550 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include "debug.h"
+#include "log.h"
+#include "protocol.h"
+
+/*
+ * Initializes the comm_element with any given type of data
+ */
+void
+init_comm_element(struct comm_element *element, enum comm_element_t type)
+{
+
+ TRACE_IN(init_comm_element);
+ memset(element, 0, sizeof(struct comm_element));
+
+ switch (type) {
+ case CET_WRITE_REQUEST:
+ init_cache_write_request(&element->c_write_request);
+ break;
+ case CET_WRITE_RESPONSE:
+ init_cache_write_response(&element->c_write_response);
+ break;
+ case CET_READ_REQUEST:
+ init_cache_read_request(&element->c_read_request);
+ break;
+ case CET_READ_RESPONSE:
+ init_cache_read_response(&element->c_read_response);
+ break;
+ case CET_TRANSFORM_REQUEST:
+ init_cache_transform_request(&element->c_transform_request);
+ break;
+ case CET_TRANSFORM_RESPONSE:
+ init_cache_transform_response(&element->c_transform_response);
+ break;
+ case CET_MP_WRITE_SESSION_REQUEST:
+ init_cache_mp_write_session_request(&element->c_mp_ws_request);
+ break;
+ case CET_MP_WRITE_SESSION_RESPONSE:
+ init_cache_mp_write_session_response(&element->c_mp_ws_response);
+ break;
+ case CET_MP_WRITE_SESSION_WRITE_REQUEST:
+ init_cache_mp_write_session_write_request(
+ &element->c_mp_ws_write_request);
+ break;
+ case CET_MP_WRITE_SESSION_WRITE_RESPONSE:
+ init_cache_mp_write_session_write_response(
+ &element->c_mp_ws_write_response);
+ break;
+ case CET_MP_READ_SESSION_REQUEST:
+ init_cache_mp_read_session_request(&element->c_mp_rs_request);
+ break;
+ case CET_MP_READ_SESSION_RESPONSE:
+ init_cache_mp_read_session_response(&element->c_mp_rs_response);
+ break;
+ case CET_MP_READ_SESSION_READ_RESPONSE:
+ init_cache_mp_read_session_read_response(
+ &element->c_mp_rs_read_response);
+ break;
+ case CET_UNDEFINED:
+ break;
+ default:
+ LOG_ERR_2("init_comm_element", "invalid communication element");
+ TRACE_OUT(init_comm_element);
+ return;
+ }
+
+ element->type = type;
+ TRACE_OUT(init_comm_element);
+}
+
+void
+finalize_comm_element(struct comm_element *element)
+{
+
+ TRACE_IN(finalize_comm_element);
+ switch (element->type) {
+ case CET_WRITE_REQUEST:
+ finalize_cache_write_request(&element->c_write_request);
+ break;
+ case CET_WRITE_RESPONSE:
+ finalize_cache_write_response(&element->c_write_response);
+ break;
+ case CET_READ_REQUEST:
+ finalize_cache_read_request(&element->c_read_request);
+ break;
+ case CET_READ_RESPONSE:
+ finalize_cache_read_response(&element->c_read_response);
+ break;
+ case CET_TRANSFORM_REQUEST:
+ finalize_cache_transform_request(&element->c_transform_request);
+ break;
+ case CET_TRANSFORM_RESPONSE:
+ finalize_cache_transform_response(
+ &element->c_transform_response);
+ break;
+ case CET_MP_WRITE_SESSION_REQUEST:
+ finalize_cache_mp_write_session_request(
+ &element->c_mp_ws_request);
+ break;
+ case CET_MP_WRITE_SESSION_RESPONSE:
+ finalize_cache_mp_write_session_response(
+ &element->c_mp_ws_response);
+ break;
+ case CET_MP_WRITE_SESSION_WRITE_REQUEST:
+ finalize_cache_mp_write_session_write_request(
+ &element->c_mp_ws_write_request);
+ break;
+ case CET_MP_WRITE_SESSION_WRITE_RESPONSE:
+ finalize_cache_mp_write_session_write_response(
+ &element->c_mp_ws_write_response);
+ break;
+ case CET_MP_READ_SESSION_REQUEST:
+ finalize_cache_mp_read_session_request(
+ &element->c_mp_rs_request);
+ break;
+ case CET_MP_READ_SESSION_RESPONSE:
+ finalize_cache_mp_read_session_response(
+ &element->c_mp_rs_response);
+ break;
+ case CET_MP_READ_SESSION_READ_RESPONSE:
+ finalize_cache_mp_read_session_read_response(
+ &element->c_mp_rs_read_response);
+ break;
+ case CET_UNDEFINED:
+ break;
+ default:
+ break;
+ }
+
+ element->type = CET_UNDEFINED;
+ TRACE_OUT(finalize_comm_element);
+}
+
+void
+init_cache_write_request(struct cache_write_request *write_request)
+{
+
+ TRACE_IN(init_cache_write_request);
+ memset(write_request, 0, sizeof(struct cache_write_request));
+ TRACE_OUT(init_cache_write_request);
+}
+
+void
+finalize_cache_write_request(struct cache_write_request *write_request)
+{
+
+ TRACE_IN(finalize_cache_write_request);
+ free(write_request->entry);
+ free(write_request->cache_key);
+ free(write_request->data);
+ TRACE_OUT(finalize_cache_write_request);
+}
+
+struct cache_write_request *
+get_cache_write_request(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_write_request);
+ assert(element->type == CET_WRITE_REQUEST);
+ TRACE_OUT(get_cache_write_request);
+ return (&element->c_write_request);
+}
+
+void
+init_cache_write_response(struct cache_write_response *write_response)
+{
+
+ TRACE_IN(init_cache_write_response);
+ memset(write_response, 0, sizeof(struct cache_write_response));
+ TRACE_OUT(init_cache_write_response);
+}
+
+void
+finalize_cache_write_response(struct cache_write_response *write_response)
+{
+
+ TRACE_IN(finalize_cache_write_response);
+ TRACE_OUT(finalize_cache_write_response);
+}
+
+struct cache_write_response *
+get_cache_write_response(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_write_response);
+ assert(element->type == CET_WRITE_RESPONSE);
+ TRACE_OUT(get_cache_write_response);
+ return (&element->c_write_response);
+}
+
+void
+init_cache_read_request(struct cache_read_request *read_request)
+{
+
+ TRACE_IN(init_cache_read_request);
+ memset(read_request, 0, sizeof(struct cache_read_request));
+ TRACE_OUT(init_cache_read_request);
+}
+
+void
+finalize_cache_read_request(struct cache_read_request *read_request)
+{
+
+ TRACE_IN(finalize_cache_read_request);
+ free(read_request->entry);
+ free(read_request->cache_key);
+ TRACE_OUT(finalize_cache_read_request);
+}
+
+struct cache_read_request *
+get_cache_read_request(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_read_request);
+ assert(element->type == CET_READ_REQUEST);
+ TRACE_OUT(get_cache_read_request);
+ return (&element->c_read_request);
+}
+
+void
+init_cache_read_response(struct cache_read_response *read_response)
+{
+
+ TRACE_IN(init_cache_read_response);
+ memset(read_response, 0, sizeof(struct cache_read_response));
+ TRACE_OUT(init_cache_read_response);
+}
+
+void
+finalize_cache_read_response(struct cache_read_response *read_response)
+{
+
+ TRACE_IN(finalize_cache_read_response);
+ free(read_response->data);
+ TRACE_OUT(finalize_cache_read_response);
+}
+
+struct cache_read_response *
+get_cache_read_response(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_read_response);
+ assert(element->type == CET_READ_RESPONSE);
+ TRACE_OUT(get_cache_read_response);
+ return (&element->c_read_response);
+}
+
+void
+init_cache_transform_request(struct cache_transform_request *transform_request)
+{
+
+ TRACE_IN(init_cache_transform_request);
+ memset(transform_request, 0, sizeof(struct cache_transform_request));
+ TRACE_OUT(init_cache_transform_request);
+}
+
+void
+finalize_cache_transform_request(
+ struct cache_transform_request *transform_request)
+{
+
+ TRACE_IN(finalize_cache_transform_request);
+ free(transform_request->entry);
+ TRACE_OUT(finalize_cache_transform_request);
+}
+
+struct cache_transform_request *
+get_cache_transform_request(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_transform_request);
+ assert(element->type == CET_TRANSFORM_REQUEST);
+ TRACE_OUT(get_cache_transform_request);
+ return (&element->c_transform_request);
+}
+
+void
+init_cache_transform_response(
+ struct cache_transform_response *transform_response)
+{
+
+ TRACE_IN(init_cache_transform_request);
+ memset(transform_response, 0, sizeof(struct cache_transform_response));
+ TRACE_OUT(init_cache_transform_request);
+}
+
+void
+finalize_cache_transform_response(
+ struct cache_transform_response *transform_response)
+{
+
+ TRACE_IN(finalize_cache_transform_response);
+ TRACE_OUT(finalize_cache_transform_response);
+}
+
+struct cache_transform_response *
+get_cache_transform_response(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_transform_response);
+ assert(element->type == CET_TRANSFORM_RESPONSE);
+ TRACE_OUT(get_cache_transform_response);
+ return (&element->c_transform_response);
+}
+
+
+void
+init_cache_mp_write_session_request(
+ struct cache_mp_write_session_request *mp_ws_request)
+{
+
+ TRACE_IN(init_cache_mp_write_session_request);
+ memset(mp_ws_request, 0,
+ sizeof(struct cache_mp_write_session_request));
+ TRACE_OUT(init_cache_mp_write_session_request);
+}
+
+void
+finalize_cache_mp_write_session_request(
+ struct cache_mp_write_session_request *mp_ws_request)
+{
+
+ TRACE_IN(finalize_cache_mp_write_session_request);
+ free(mp_ws_request->entry);
+ TRACE_OUT(finalize_cache_mp_write_session_request);
+}
+
+struct cache_mp_write_session_request *
+get_cache_mp_write_session_request(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_mp_write_session_request);
+ assert(element->type == CET_MP_WRITE_SESSION_REQUEST);
+ TRACE_OUT(get_cache_mp_write_session_request);
+ return (&element->c_mp_ws_request);
+}
+
+void
+init_cache_mp_write_session_response(
+ struct cache_mp_write_session_response *mp_ws_response)
+{
+
+ TRACE_IN(init_cache_mp_write_session_response);
+ memset(mp_ws_response, 0,
+ sizeof(struct cache_mp_write_session_response));
+ TRACE_OUT(init_cache_mp_write_session_response);
+}
+
+void
+finalize_cache_mp_write_session_response(
+ struct cache_mp_write_session_response *mp_ws_response)
+{
+
+ TRACE_IN(finalize_cache_mp_write_session_response);
+ TRACE_OUT(finalize_cache_mp_write_session_response);
+}
+
+struct cache_mp_write_session_response *
+get_cache_mp_write_session_response(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_mp_write_session_response);
+ assert(element->type == CET_MP_WRITE_SESSION_RESPONSE);
+ TRACE_OUT(get_cache_mp_write_session_response);
+ return (&element->c_mp_ws_response);
+}
+
+void
+init_cache_mp_write_session_write_request(
+ struct cache_mp_write_session_write_request *mp_ws_write_request)
+{
+
+ TRACE_IN(init_cache_mp_write_session_write_request);
+ memset(mp_ws_write_request, 0,
+ sizeof(struct cache_mp_write_session_write_request));
+ TRACE_OUT(init_cache_mp_write_session_write_response);
+}
+
+void
+finalize_cache_mp_write_session_write_request(
+ struct cache_mp_write_session_write_request *mp_ws_write_request)
+{
+
+ TRACE_IN(finalize_cache_mp_write_session_write_request);
+ free(mp_ws_write_request->data);
+ TRACE_OUT(finalize_cache_mp_write_session_write_request);
+}
+
+struct cache_mp_write_session_write_request *
+get_cache_mp_write_session_write_request(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_mp_write_session_write_request);
+ assert(element->type == CET_MP_WRITE_SESSION_WRITE_REQUEST);
+ TRACE_OUT(get_cache_mp_write_session_write_request);
+ return (&element->c_mp_ws_write_request);
+}
+
+void
+init_cache_mp_write_session_write_response(
+ struct cache_mp_write_session_write_response *mp_ws_write_response)
+{
+
+ TRACE_IN(init_cache_mp_write_session_write_response);
+ memset(mp_ws_write_response, 0,
+ sizeof(struct cache_mp_write_session_write_response));
+ TRACE_OUT(init_cache_mp_write_session_write_response);
+}
+
+void
+finalize_cache_mp_write_session_write_response(
+ struct cache_mp_write_session_write_response *mp_ws_write_response)
+{
+
+ TRACE_IN(finalize_cache_mp_write_session_write_response);
+ TRACE_OUT(finalize_cache_mp_write_session_write_response);
+}
+
+struct cache_mp_write_session_write_response *
+get_cache_mp_write_session_write_response(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_mp_write_session_write_response);
+ assert(element->type == CET_MP_WRITE_SESSION_WRITE_RESPONSE);
+ TRACE_OUT(get_cache_mp_write_session_write_response);
+ return (&element->c_mp_ws_write_response);
+}
+
+void
+init_cache_mp_read_session_request(
+ struct cache_mp_read_session_request *mp_rs_request)
+{
+
+ TRACE_IN(init_cache_mp_read_session_request);
+ memset(mp_rs_request, 0, sizeof(struct cache_mp_read_session_request));
+ TRACE_OUT(init_cache_mp_read_session_request);
+}
+
+void
+finalize_cache_mp_read_session_request(
+ struct cache_mp_read_session_request *mp_rs_request)
+{
+
+ TRACE_IN(finalize_cache_mp_read_session_request);
+ free(mp_rs_request->entry);
+ TRACE_OUT(finalize_cache_mp_read_session_request);
+}
+
+struct cache_mp_read_session_request *
+get_cache_mp_read_session_request(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_mp_read_session_request);
+ assert(element->type == CET_MP_READ_SESSION_REQUEST);
+ TRACE_OUT(get_cache_mp_read_session_request);
+ return (&element->c_mp_rs_request);
+}
+
+void
+init_cache_mp_read_session_response(
+ struct cache_mp_read_session_response *mp_rs_response)
+{
+
+ TRACE_IN(init_cache_mp_read_session_response);
+ memset(mp_rs_response, 0,
+ sizeof(struct cache_mp_read_session_response));
+ TRACE_OUT(init_cache_mp_read_session_response);
+}
+
+void
+finalize_cache_mp_read_session_response(
+ struct cache_mp_read_session_response *mp_rs_response)
+{
+
+ TRACE_IN(finalize_cache_mp_read_session_response);
+ TRACE_OUT(finalize_cache_mp_read_session_response);
+}
+
+struct cache_mp_read_session_response *
+get_cache_mp_read_session_response(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_mp_read_session_response);
+ assert(element->type == CET_MP_READ_SESSION_RESPONSE);
+ TRACE_OUT(get_cache_mp_read_session_response);
+ return (&element->c_mp_rs_response);
+}
+
+void
+init_cache_mp_read_session_read_response(
+ struct cache_mp_read_session_read_response *mp_ws_read_response)
+{
+
+ TRACE_IN(init_cache_mp_read_session_read_response);
+ memset(mp_ws_read_response, 0,
+ sizeof(struct cache_mp_read_session_read_response));
+ TRACE_OUT(init_cache_mp_read_session_read_response);
+}
+
+void
+finalize_cache_mp_read_session_read_response(
+ struct cache_mp_read_session_read_response *mp_rs_read_response)
+{
+
+ TRACE_IN(finalize_cache_mp_read_session_read_response);
+ free(mp_rs_read_response->data);
+ TRACE_OUT(finalize_cache_mp_read_session_read_response);
+}
+
+struct cache_mp_read_session_read_response *
+get_cache_mp_read_session_read_response(struct comm_element *element)
+{
+
+ TRACE_IN(get_cache_mp_read_session_read_response);
+ assert(element->type == CET_MP_READ_SESSION_READ_RESPONSE);
+ TRACE_OUT(get_cache_mp_read_session_read_response);
+ return (&element->c_mp_rs_read_response);
+}
diff --git a/usr.sbin/nscd/protocol.h b/usr.sbin/nscd/protocol.h
new file mode 100644
index 0000000..7fadbfc
--- /dev/null
+++ b/usr.sbin/nscd/protocol.h
@@ -0,0 +1,265 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_PROTOCOL_H__
+#define __CACHED_PROTOCOL_H__
+
+#include <stdlib.h>
+
+/* maximum buffer size to receive - larger buffers are not allowed */
+#define MAX_BUFFER_SIZE (1 << 20)
+
+/* buffer size correctness checking routine */
+#define BUFSIZE_CORRECT(x) (((x) > 0) && ((x) < MAX_BUFFER_SIZE))
+#define BUFSIZE_INVALID(x) (!BUFSIZE_CORRECT(x))
+
+/* structures below represent the data that are sent/received by the daemon */
+struct cache_write_request
+{
+ char *entry;
+ char *cache_key;
+ char *data;
+
+ size_t entry_length;
+ size_t cache_key_size;
+ size_t data_size;
+};
+
+struct cache_write_response
+{
+ int error_code;
+};
+
+struct cache_read_request
+{
+ char *entry;
+ char *cache_key;
+
+ size_t entry_length;
+ size_t cache_key_size;
+};
+
+struct cache_read_response
+{
+ char *data; // ignored if error_code is not 0
+ size_t data_size; // ignored if error_code is not 0
+
+ int error_code;
+};
+
+enum transformation_type {
+ TT_USER = 0, // tranform only the entries of the caller
+ TT_ALL = 1 // transform all entries
+};
+
+struct cache_transform_request
+{
+ char *entry; // ignored if entry_length is 0
+ size_t entry_length;
+
+ int transformation_type;
+};
+
+struct cache_transform_response
+{
+ int error_code;
+};
+
+struct cache_mp_write_session_request {
+ char *entry;
+ size_t entry_length;
+};
+
+struct cache_mp_write_session_response {
+ int error_code;
+};
+
+struct cache_mp_write_session_write_request {
+ char *data;
+ size_t data_size;
+};
+
+struct cache_mp_write_session_write_response {
+ int error_code;
+};
+
+struct cache_mp_read_session_request {
+ char *entry;
+ size_t entry_length;
+};
+
+struct cache_mp_read_session_response {
+ int error_code;
+};
+
+struct cache_mp_read_session_read_response {
+ char *data;
+ size_t data_size;
+
+ int error_code;
+};
+
+
+enum comm_element_t {
+ CET_UNDEFINED = 0,
+ CET_WRITE_REQUEST = 1,
+ CET_WRITE_RESPONSE = 2,
+ CET_READ_REQUEST = 3,
+ CET_READ_RESPONSE = 4,
+ CET_TRANSFORM_REQUEST = 5,
+ CET_TRANSFORM_RESPONSE = 6,
+ CET_MP_WRITE_SESSION_REQUEST = 7,
+ CET_MP_WRITE_SESSION_RESPONSE = 8,
+ CET_MP_WRITE_SESSION_WRITE_REQUEST = 9,
+ CET_MP_WRITE_SESSION_WRITE_RESPONSE = 10,
+ CET_MP_WRITE_SESSION_CLOSE_NOTIFICATION = 11,
+ CET_MP_WRITE_SESSION_ABANDON_NOTIFICATION = 12,
+ CET_MP_READ_SESSION_REQUEST = 13,
+ CET_MP_READ_SESSION_RESPONSE = 14,
+ CET_MP_READ_SESSION_READ_REQUEST = 15,
+ CET_MP_READ_SESSION_READ_RESPONSE = 16,
+ CET_MP_READ_SESSION_CLOSE_NOTIFICATION = 17,
+ CET_MAX = 18
+};
+
+/*
+ * The comm_element is used as the holder of any known (defined above) data
+ * type that is to be sent/received.
+ */
+struct comm_element
+{
+ union {
+ struct cache_write_request c_write_request;
+ struct cache_write_response c_write_response;
+ struct cache_read_request c_read_request;
+ struct cache_read_response c_read_response;
+ struct cache_transform_request c_transform_request;
+ struct cache_transform_response c_transform_response;
+
+ struct cache_mp_write_session_request c_mp_ws_request;
+ struct cache_mp_write_session_response c_mp_ws_response;
+ struct cache_mp_write_session_write_request c_mp_ws_write_request;
+ struct cache_mp_write_session_write_response c_mp_ws_write_response;
+
+ struct cache_mp_read_session_request c_mp_rs_request;
+ struct cache_mp_read_session_response c_mp_rs_response;
+ struct cache_mp_read_session_read_response c_mp_rs_read_response;
+ };
+ enum comm_element_t type;
+};
+
+extern void init_comm_element(struct comm_element *, enum comm_element_t type);
+extern void finalize_comm_element(struct comm_element *);
+
+/*
+ * For each type of data, there is three functions (init/finalize/get), that
+ * used with comm_element structure
+ */
+extern void init_cache_write_request(struct cache_write_request *);
+extern void finalize_cache_write_request(struct cache_write_request *);
+extern struct cache_write_request *get_cache_write_request(
+ struct comm_element *);
+
+extern void init_cache_write_response(struct cache_write_response *);
+extern void finalize_cache_write_response(struct cache_write_response *);
+extern struct cache_write_response *get_cache_write_response(
+ struct comm_element *);
+
+extern void init_cache_read_request(struct cache_read_request *);
+extern void finalize_cache_read_request(struct cache_read_request *);
+extern struct cache_read_request *get_cache_read_request(
+ struct comm_element *);
+
+extern void init_cache_read_response(struct cache_read_response *);
+extern void finalize_cache_read_response(struct cache_read_response *);
+extern struct cache_read_response *get_cache_read_response(
+ struct comm_element *);
+
+extern void init_cache_transform_request(struct cache_transform_request *);
+extern void finalize_cache_transform_request(struct cache_transform_request *);
+extern struct cache_transform_request *get_cache_transform_request(
+ struct comm_element *);
+
+extern void init_cache_transform_response(struct cache_transform_response *);
+extern void finalize_cache_transform_response(
+ struct cache_transform_response *);
+extern struct cache_transform_response *get_cache_transform_response(
+ struct comm_element *);
+
+extern void init_cache_mp_write_session_request(
+ struct cache_mp_write_session_request *);
+extern void finalize_cache_mp_write_session_request(
+ struct cache_mp_write_session_request *);
+extern struct cache_mp_write_session_request *
+ get_cache_mp_write_session_request(
+ struct comm_element *);
+
+extern void init_cache_mp_write_session_response(
+ struct cache_mp_write_session_response *);
+extern void finalize_cache_mp_write_session_response(
+ struct cache_mp_write_session_response *);
+extern struct cache_mp_write_session_response *
+ get_cache_mp_write_session_response(struct comm_element *);
+
+extern void init_cache_mp_write_session_write_request(
+ struct cache_mp_write_session_write_request *);
+extern void finalize_cache_mp_write_session_write_request(
+ struct cache_mp_write_session_write_request *);
+extern struct cache_mp_write_session_write_request *
+ get_cache_mp_write_session_write_request(struct comm_element *);
+
+extern void init_cache_mp_write_session_write_response(
+ struct cache_mp_write_session_write_response *);
+extern void finalize_cache_mp_write_session_write_response(
+ struct cache_mp_write_session_write_response *);
+extern struct cache_mp_write_session_write_response *
+ get_cache_mp_write_session_write_response(struct comm_element *);
+
+extern void init_cache_mp_read_session_request(
+ struct cache_mp_read_session_request *);
+extern void finalize_cache_mp_read_session_request(
+ struct cache_mp_read_session_request *);
+extern struct cache_mp_read_session_request *get_cache_mp_read_session_request(
+ struct comm_element *);
+
+extern void init_cache_mp_read_session_response(
+ struct cache_mp_read_session_response *);
+extern void finalize_cache_mp_read_session_response(
+ struct cache_mp_read_session_response *);
+extern struct cache_mp_read_session_response *
+ get_cache_mp_read_session_response(
+ struct comm_element *);
+
+extern void init_cache_mp_read_session_read_response(
+ struct cache_mp_read_session_read_response *);
+extern void finalize_cache_mp_read_session_read_response(
+ struct cache_mp_read_session_read_response *);
+extern struct cache_mp_read_session_read_response *
+ get_cache_mp_read_session_read_response(struct comm_element *);
+
+#endif
diff --git a/usr.sbin/nscd/query.c b/usr.sbin/nscd/query.c
new file mode 100644
index 0000000..28882c3
--- /dev/null
+++ b/usr.sbin/nscd/query.c
@@ -0,0 +1,1278 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/event.h>
+#include <assert.h>
+#include <errno.h>
+#include <nsswitch.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "config.h"
+#include "debug.h"
+#include "query.h"
+#include "log.h"
+#include "mp_ws_query.h"
+#include "mp_rs_query.h"
+#include "singletons.h"
+
+static const char negative_data[1] = { 0 };
+
+extern void get_time_func(struct timeval *);
+
+static void clear_config_entry(struct configuration_entry *);
+static void clear_config_entry_part(struct configuration_entry *,
+ const char *, size_t);
+
+static int on_query_startup(struct query_state *);
+static void on_query_destroy(struct query_state *);
+
+static int on_read_request_read1(struct query_state *);
+static int on_read_request_read2(struct query_state *);
+static int on_read_request_process(struct query_state *);
+static int on_read_response_write1(struct query_state *);
+static int on_read_response_write2(struct query_state *);
+
+static int on_rw_mapper(struct query_state *);
+
+static int on_transform_request_read1(struct query_state *);
+static int on_transform_request_read2(struct query_state *);
+static int on_transform_request_process(struct query_state *);
+static int on_transform_response_write1(struct query_state *);
+
+static int on_write_request_read1(struct query_state *);
+static int on_write_request_read2(struct query_state *);
+static int on_negative_write_request_process(struct query_state *);
+static int on_write_request_process(struct query_state *);
+static int on_write_response_write1(struct query_state *);
+
+/*
+ * Clears the specified configuration entry (clears the cache for positive and
+ * and negative entries) and also for all multipart entries.
+ */
+static void
+clear_config_entry(struct configuration_entry *config_entry)
+{
+ size_t i;
+
+ TRACE_IN(clear_config_entry);
+ configuration_lock_entry(config_entry, CELT_POSITIVE);
+ if (config_entry->positive_cache_entry != NULL)
+ transform_cache_entry(
+ config_entry->positive_cache_entry,
+ CTT_CLEAR);
+ configuration_unlock_entry(config_entry, CELT_POSITIVE);
+
+ configuration_lock_entry(config_entry, CELT_NEGATIVE);
+ if (config_entry->negative_cache_entry != NULL)
+ transform_cache_entry(
+ config_entry->negative_cache_entry,
+ CTT_CLEAR);
+ configuration_unlock_entry(config_entry, CELT_NEGATIVE);
+
+ configuration_lock_entry(config_entry, CELT_MULTIPART);
+ for (i = 0; i < config_entry->mp_cache_entries_size; ++i)
+ transform_cache_entry(
+ config_entry->mp_cache_entries[i],
+ CTT_CLEAR);
+ configuration_unlock_entry(config_entry, CELT_MULTIPART);
+
+ TRACE_OUT(clear_config_entry);
+}
+
+/*
+ * Clears the specified configuration entry by deleting only the elements,
+ * that are owned by the user with specified eid_str.
+ */
+static void
+clear_config_entry_part(struct configuration_entry *config_entry,
+ const char *eid_str, size_t eid_str_length)
+{
+ cache_entry *start, *finish, *mp_entry;
+ TRACE_IN(clear_config_entry_part);
+ configuration_lock_entry(config_entry, CELT_POSITIVE);
+ if (config_entry->positive_cache_entry != NULL)
+ transform_cache_entry_part(
+ config_entry->positive_cache_entry,
+ CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
+ configuration_unlock_entry(config_entry, CELT_POSITIVE);
+
+ configuration_lock_entry(config_entry, CELT_NEGATIVE);
+ if (config_entry->negative_cache_entry != NULL)
+ transform_cache_entry_part(
+ config_entry->negative_cache_entry,
+ CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
+ configuration_unlock_entry(config_entry, CELT_NEGATIVE);
+
+ configuration_lock_entry(config_entry, CELT_MULTIPART);
+ if (configuration_entry_find_mp_cache_entries(config_entry,
+ eid_str, &start, &finish) == 0) {
+ for (mp_entry = start; mp_entry != finish; ++mp_entry)
+ transform_cache_entry(*mp_entry, CTT_CLEAR);
+ }
+ configuration_unlock_entry(config_entry, CELT_MULTIPART);
+
+ TRACE_OUT(clear_config_entry_part);
+}
+
+/*
+ * This function is assigned to the query_state structue on its creation.
+ * It's main purpose is to receive credentials from the client.
+ */
+static int
+on_query_startup(struct query_state *qstate)
+{
+ struct msghdr cred_hdr;
+ struct iovec iov;
+ int elem_type;
+
+ struct {
+ struct cmsghdr hdr;
+ struct cmsgcred creds;
+ } cmsg;
+
+ TRACE_IN(on_query_startup);
+ assert(qstate != NULL);
+
+ memset(&cred_hdr, 0, sizeof(struct msghdr));
+ cred_hdr.msg_iov = &iov;
+ cred_hdr.msg_iovlen = 1;
+ cred_hdr.msg_control = &cmsg;
+ cred_hdr.msg_controllen = sizeof(cmsg);
+
+ memset(&iov, 0, sizeof(struct iovec));
+ iov.iov_base = &elem_type;
+ iov.iov_len = sizeof(int);
+
+ if (recvmsg(qstate->sockfd, &cred_hdr, 0) == -1) {
+ TRACE_OUT(on_query_startup);
+ return (-1);
+ }
+
+ if (cmsg.hdr.cmsg_len != sizeof cmsg
+ || cmsg.hdr.cmsg_level != SOL_SOCKET
+ || cmsg.hdr.cmsg_type != SCM_CREDS) {
+ TRACE_OUT(on_query_startup);
+ return (-1);
+ }
+
+ qstate->uid = cmsg.creds.cmcred_uid;
+ qstate->gid = cmsg.creds.cmcred_gid;
+
+#if defined(NS_CACHED_EID_CHECKING) || defined(NS_STRICT_CACHED_EID_CHECKING)
+/*
+ * This check is probably a bit redundant - per-user cache is always separated
+ * by the euid/egid pair
+ */
+ if (check_query_eids(qstate) != 0) {
+#ifdef NS_STRICT_CACHED_EID_CHECKING
+ TRACE_OUT(on_query_startup);
+ return (-1);
+#else
+ if ((elem_type != CET_READ_REQUEST) &&
+ (elem_type != CET_MP_READ_SESSION_REQUEST) &&
+ (elem_type != CET_WRITE_REQUEST) &&
+ (elem_type != CET_MP_WRITE_SESSION_REQUEST)) {
+ TRACE_OUT(on_query_startup);
+ return (-1);
+ }
+#endif
+ }
+#endif
+
+ switch (elem_type) {
+ case CET_WRITE_REQUEST:
+ qstate->process_func = on_write_request_read1;
+ break;
+ case CET_READ_REQUEST:
+ qstate->process_func = on_read_request_read1;
+ break;
+ case CET_TRANSFORM_REQUEST:
+ qstate->process_func = on_transform_request_read1;
+ break;
+ case CET_MP_WRITE_SESSION_REQUEST:
+ qstate->process_func = on_mp_write_session_request_read1;
+ break;
+ case CET_MP_READ_SESSION_REQUEST:
+ qstate->process_func = on_mp_read_session_request_read1;
+ break;
+ default:
+ TRACE_OUT(on_query_startup);
+ return (-1);
+ }
+
+ qstate->kevent_watermark = 0;
+ TRACE_OUT(on_query_startup);
+ return (0);
+}
+
+/*
+ * on_rw_mapper is used to process multiple read/write requests during
+ * one connection session. It's never called in the beginning (on query_state
+ * creation) as it does not process the multipart requests and does not
+ * receive credentials
+ */
+static int
+on_rw_mapper(struct query_state *qstate)
+{
+ ssize_t result;
+ int elem_type;
+
+ TRACE_IN(on_rw_mapper);
+ if (qstate->kevent_watermark == 0) {
+ qstate->kevent_watermark = sizeof(int);
+ } else {
+ result = qstate->read_func(qstate, &elem_type, sizeof(int));
+ if (result != sizeof(int)) {
+ TRACE_OUT(on_rw_mapper);
+ return (-1);
+ }
+
+ switch (elem_type) {
+ case CET_WRITE_REQUEST:
+ qstate->kevent_watermark = sizeof(size_t);
+ qstate->process_func = on_write_request_read1;
+ break;
+ case CET_READ_REQUEST:
+ qstate->kevent_watermark = sizeof(size_t);
+ qstate->process_func = on_read_request_read1;
+ break;
+ default:
+ TRACE_OUT(on_rw_mapper);
+ return (-1);
+ break;
+ }
+ }
+ TRACE_OUT(on_rw_mapper);
+ return (0);
+}
+
+/*
+ * The default query_destroy function
+ */
+static void
+on_query_destroy(struct query_state *qstate)
+{
+
+ TRACE_IN(on_query_destroy);
+ finalize_comm_element(&qstate->response);
+ finalize_comm_element(&qstate->request);
+ TRACE_OUT(on_query_destroy);
+}
+
+/*
+ * The functions below are used to process write requests.
+ * - on_write_request_read1 and on_write_request_read2 read the request itself
+ * - on_write_request_process processes it (if the client requests to
+ * cache the negative result, the on_negative_write_request_process is used)
+ * - on_write_response_write1 sends the response
+ */
+static int
+on_write_request_read1(struct query_state *qstate)
+{
+ struct cache_write_request *write_request;
+ ssize_t result;
+
+ TRACE_IN(on_write_request_read1);
+ if (qstate->kevent_watermark == 0)
+ qstate->kevent_watermark = sizeof(size_t) * 3;
+ else {
+ init_comm_element(&qstate->request, CET_WRITE_REQUEST);
+ write_request = get_cache_write_request(&qstate->request);
+
+ result = qstate->read_func(qstate, &write_request->entry_length,
+ sizeof(size_t));
+ result += qstate->read_func(qstate,
+ &write_request->cache_key_size, sizeof(size_t));
+ result += qstate->read_func(qstate,
+ &write_request->data_size, sizeof(size_t));
+
+ if (result != sizeof(size_t) * 3) {
+ TRACE_OUT(on_write_request_read1);
+ return (-1);
+ }
+
+ if (BUFSIZE_INVALID(write_request->entry_length) ||
+ BUFSIZE_INVALID(write_request->cache_key_size) ||
+ (BUFSIZE_INVALID(write_request->data_size) &&
+ (write_request->data_size != 0))) {
+ TRACE_OUT(on_write_request_read1);
+ return (-1);
+ }
+
+ write_request->entry = (char *)malloc(
+ write_request->entry_length + 1);
+ assert(write_request->entry != NULL);
+ memset(write_request->entry, 0,
+ write_request->entry_length + 1);
+
+ write_request->cache_key = (char *)malloc(
+ write_request->cache_key_size +
+ qstate->eid_str_length);
+ assert(write_request->cache_key != NULL);
+ memcpy(write_request->cache_key, qstate->eid_str,
+ qstate->eid_str_length);
+ memset(write_request->cache_key + qstate->eid_str_length, 0,
+ write_request->cache_key_size);
+
+ if (write_request->data_size != 0) {
+ write_request->data = (char *)malloc(
+ write_request->data_size);
+ assert(write_request->data != NULL);
+ memset(write_request->data, 0,
+ write_request->data_size);
+ }
+
+ qstate->kevent_watermark = write_request->entry_length +
+ write_request->cache_key_size +
+ write_request->data_size;
+ qstate->process_func = on_write_request_read2;
+ }
+
+ TRACE_OUT(on_write_request_read1);
+ return (0);
+}
+
+static int
+on_write_request_read2(struct query_state *qstate)
+{
+ struct cache_write_request *write_request;
+ ssize_t result;
+
+ TRACE_IN(on_write_request_read2);
+ write_request = get_cache_write_request(&qstate->request);
+
+ result = qstate->read_func(qstate, write_request->entry,
+ write_request->entry_length);
+ result += qstate->read_func(qstate, write_request->cache_key +
+ qstate->eid_str_length, write_request->cache_key_size);
+ if (write_request->data_size != 0)
+ result += qstate->read_func(qstate, write_request->data,
+ write_request->data_size);
+
+ if (result != qstate->kevent_watermark) {
+ TRACE_OUT(on_write_request_read2);
+ return (-1);
+ }
+ write_request->cache_key_size += qstate->eid_str_length;
+
+ qstate->kevent_watermark = 0;
+ if (write_request->data_size != 0)
+ qstate->process_func = on_write_request_process;
+ else
+ qstate->process_func = on_negative_write_request_process;
+ TRACE_OUT(on_write_request_read2);
+ return (0);
+}
+
+static int
+on_write_request_process(struct query_state *qstate)
+{
+ struct cache_write_request *write_request;
+ struct cache_write_response *write_response;
+ cache_entry c_entry;
+
+ TRACE_IN(on_write_request_process);
+ init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
+ write_response = get_cache_write_response(&qstate->response);
+ write_request = get_cache_write_request(&qstate->request);
+
+ qstate->config_entry = configuration_find_entry(
+ s_configuration, write_request->entry);
+
+ if (qstate->config_entry == NULL) {
+ write_response->error_code = ENOENT;
+
+ LOG_ERR_2("write_request", "can't find configuration"
+ " entry '%s'. aborting request", write_request->entry);
+ goto fin;
+ }
+
+ if (qstate->config_entry->enabled == 0) {
+ write_response->error_code = EACCES;
+
+ LOG_ERR_2("write_request",
+ "configuration entry '%s' is disabled",
+ write_request->entry);
+ goto fin;
+ }
+
+ if (qstate->config_entry->perform_actual_lookups != 0) {
+ write_response->error_code = EOPNOTSUPP;
+
+ LOG_ERR_2("write_request",
+ "entry '%s' performs lookups by itself: "
+ "can't write to it", write_request->entry);
+ goto fin;
+ }
+
+ configuration_lock_rdlock(s_configuration);
+ c_entry = find_cache_entry(s_cache,
+ qstate->config_entry->positive_cache_params.entry_name);
+ configuration_unlock(s_configuration);
+ if (c_entry != NULL) {
+ configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
+ qstate->config_entry->positive_cache_entry = c_entry;
+ write_response->error_code = cache_write(c_entry,
+ write_request->cache_key,
+ write_request->cache_key_size,
+ write_request->data,
+ write_request->data_size);
+ configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
+
+ if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
+ (qstate->config_entry->common_query_timeout.tv_usec != 0))
+ memcpy(&qstate->timeout,
+ &qstate->config_entry->common_query_timeout,
+ sizeof(struct timeval));
+
+ } else
+ write_response->error_code = -1;
+
+fin:
+ qstate->kevent_filter = EVFILT_WRITE;
+ qstate->kevent_watermark = sizeof(int);
+ qstate->process_func = on_write_response_write1;
+
+ TRACE_OUT(on_write_request_process);
+ return (0);
+}
+
+static int
+on_negative_write_request_process(struct query_state *qstate)
+{
+ struct cache_write_request *write_request;
+ struct cache_write_response *write_response;
+ cache_entry c_entry;
+
+ TRACE_IN(on_negative_write_request_process);
+ init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
+ write_response = get_cache_write_response(&qstate->response);
+ write_request = get_cache_write_request(&qstate->request);
+
+ qstate->config_entry = configuration_find_entry (
+ s_configuration, write_request->entry);
+
+ if (qstate->config_entry == NULL) {
+ write_response->error_code = ENOENT;
+
+ LOG_ERR_2("negative_write_request",
+ "can't find configuration"
+ " entry '%s'. aborting request", write_request->entry);
+ goto fin;
+ }
+
+ if (qstate->config_entry->enabled == 0) {
+ write_response->error_code = EACCES;
+
+ LOG_ERR_2("negative_write_request",
+ "configuration entry '%s' is disabled",
+ write_request->entry);
+ goto fin;
+ }
+
+ if (qstate->config_entry->perform_actual_lookups != 0) {
+ write_response->error_code = EOPNOTSUPP;
+
+ LOG_ERR_2("negative_write_request",
+ "entry '%s' performs lookups by itself: "
+ "can't write to it", write_request->entry);
+ goto fin;
+ } else {
+#ifdef NS_CACHED_EID_CHECKING
+ if (check_query_eids(qstate) != 0) {
+ write_response->error_code = EPERM;
+ goto fin;
+ }
+#endif
+ }
+
+ configuration_lock_rdlock(s_configuration);
+ c_entry = find_cache_entry(s_cache,
+ qstate->config_entry->negative_cache_params.entry_name);
+ configuration_unlock(s_configuration);
+ if (c_entry != NULL) {
+ configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
+ qstate->config_entry->negative_cache_entry = c_entry;
+ write_response->error_code = cache_write(c_entry,
+ write_request->cache_key,
+ write_request->cache_key_size,
+ negative_data,
+ sizeof(negative_data));
+ configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
+
+ if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
+ (qstate->config_entry->common_query_timeout.tv_usec != 0))
+ memcpy(&qstate->timeout,
+ &qstate->config_entry->common_query_timeout,
+ sizeof(struct timeval));
+ } else
+ write_response->error_code = -1;
+
+fin:
+ qstate->kevent_filter = EVFILT_WRITE;
+ qstate->kevent_watermark = sizeof(int);
+ qstate->process_func = on_write_response_write1;
+
+ TRACE_OUT(on_negative_write_request_process);
+ return (0);
+}
+
+static int
+on_write_response_write1(struct query_state *qstate)
+{
+ struct cache_write_response *write_response;
+ ssize_t result;
+
+ TRACE_IN(on_write_response_write1);
+ write_response = get_cache_write_response(&qstate->response);
+ result = qstate->write_func(qstate, &write_response->error_code,
+ sizeof(int));
+ if (result != sizeof(int)) {
+ TRACE_OUT(on_write_response_write1);
+ return (-1);
+ }
+
+ finalize_comm_element(&qstate->request);
+ finalize_comm_element(&qstate->response);
+
+ qstate->kevent_watermark = sizeof(int);
+ qstate->kevent_filter = EVFILT_READ;
+ qstate->process_func = on_rw_mapper;
+
+ TRACE_OUT(on_write_response_write1);
+ return (0);
+}
+
+/*
+ * The functions below are used to process read requests.
+ * - on_read_request_read1 and on_read_request_read2 read the request itself
+ * - on_read_request_process processes it
+ * - on_read_response_write1 and on_read_response_write2 send the response
+ */
+static int
+on_read_request_read1(struct query_state *qstate)
+{
+ struct cache_read_request *read_request;
+ ssize_t result;
+
+ TRACE_IN(on_read_request_read1);
+ if (qstate->kevent_watermark == 0)
+ qstate->kevent_watermark = sizeof(size_t) * 2;
+ else {
+ init_comm_element(&qstate->request, CET_READ_REQUEST);
+ read_request = get_cache_read_request(&qstate->request);
+
+ result = qstate->read_func(qstate,
+ &read_request->entry_length, sizeof(size_t));
+ result += qstate->read_func(qstate,
+ &read_request->cache_key_size, sizeof(size_t));
+
+ if (result != sizeof(size_t) * 2) {
+ TRACE_OUT(on_read_request_read1);
+ return (-1);
+ }
+
+ if (BUFSIZE_INVALID(read_request->entry_length) ||
+ BUFSIZE_INVALID(read_request->cache_key_size)) {
+ TRACE_OUT(on_read_request_read1);
+ return (-1);
+ }
+
+ read_request->entry = (char *)malloc(
+ read_request->entry_length + 1);
+ assert(read_request->entry != NULL);
+ memset(read_request->entry, 0, read_request->entry_length + 1);
+
+ read_request->cache_key = (char *)malloc(
+ read_request->cache_key_size +
+ qstate->eid_str_length);
+ assert(read_request->cache_key != NULL);
+ memcpy(read_request->cache_key, qstate->eid_str,
+ qstate->eid_str_length);
+ memset(read_request->cache_key + qstate->eid_str_length, 0,
+ read_request->cache_key_size);
+
+ qstate->kevent_watermark = read_request->entry_length +
+ read_request->cache_key_size;
+ qstate->process_func = on_read_request_read2;
+ }
+
+ TRACE_OUT(on_read_request_read1);
+ return (0);
+}
+
+static int
+on_read_request_read2(struct query_state *qstate)
+{
+ struct cache_read_request *read_request;
+ ssize_t result;
+
+ TRACE_IN(on_read_request_read2);
+ read_request = get_cache_read_request(&qstate->request);
+
+ result = qstate->read_func(qstate, read_request->entry,
+ read_request->entry_length);
+ result += qstate->read_func(qstate,
+ read_request->cache_key + qstate->eid_str_length,
+ read_request->cache_key_size);
+
+ if (result != qstate->kevent_watermark) {
+ TRACE_OUT(on_read_request_read2);
+ return (-1);
+ }
+ read_request->cache_key_size += qstate->eid_str_length;
+
+ qstate->kevent_watermark = 0;
+ qstate->process_func = on_read_request_process;
+
+ TRACE_OUT(on_read_request_read2);
+ return (0);
+}
+
+static int
+on_read_request_process(struct query_state *qstate)
+{
+ struct cache_read_request *read_request;
+ struct cache_read_response *read_response;
+ cache_entry c_entry, neg_c_entry;
+
+ struct agent *lookup_agent;
+ struct common_agent *c_agent;
+ int res;
+
+ TRACE_IN(on_read_request_process);
+ init_comm_element(&qstate->response, CET_READ_RESPONSE);
+ read_response = get_cache_read_response(&qstate->response);
+ read_request = get_cache_read_request(&qstate->request);
+
+ qstate->config_entry = configuration_find_entry(
+ s_configuration, read_request->entry);
+ if (qstate->config_entry == NULL) {
+ read_response->error_code = ENOENT;
+
+ LOG_ERR_2("read_request",
+ "can't find configuration "
+ "entry '%s'. aborting request", read_request->entry);
+ goto fin;
+ }
+
+ if (qstate->config_entry->enabled == 0) {
+ read_response->error_code = EACCES;
+
+ LOG_ERR_2("read_request",
+ "configuration entry '%s' is disabled",
+ read_request->entry);
+ goto fin;
+ }
+
+ /*
+ * if we perform lookups by ourselves, then we don't need to separate
+ * cache entries by euid and egid
+ */
+ if (qstate->config_entry->perform_actual_lookups != 0)
+ memset(read_request->cache_key, 0, qstate->eid_str_length);
+ else {
+#ifdef NS_CACHED_EID_CHECKING
+ if (check_query_eids(qstate) != 0) {
+ /* if the lookup is not self-performing, we check for clients euid/egid */
+ read_response->error_code = EPERM;
+ goto fin;
+ }
+#endif
+ }
+
+ configuration_lock_rdlock(s_configuration);
+ c_entry = find_cache_entry(s_cache,
+ qstate->config_entry->positive_cache_params.entry_name);
+ neg_c_entry = find_cache_entry(s_cache,
+ qstate->config_entry->negative_cache_params.entry_name);
+ configuration_unlock(s_configuration);
+ if ((c_entry != NULL) && (neg_c_entry != NULL)) {
+ configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
+ qstate->config_entry->positive_cache_entry = c_entry;
+ read_response->error_code = cache_read(c_entry,
+ read_request->cache_key,
+ read_request->cache_key_size, NULL,
+ &read_response->data_size);
+
+ if (read_response->error_code == -2) {
+ read_response->data = (char *)malloc(
+ read_response->data_size);
+ assert(read_response != NULL);
+ read_response->error_code = cache_read(c_entry,
+ read_request->cache_key,
+ read_request->cache_key_size,
+ read_response->data,
+ &read_response->data_size);
+ }
+ configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
+
+ configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
+ qstate->config_entry->negative_cache_entry = neg_c_entry;
+ if (read_response->error_code == -1) {
+ read_response->error_code = cache_read(neg_c_entry,
+ read_request->cache_key,
+ read_request->cache_key_size, NULL,
+ &read_response->data_size);
+
+ if (read_response->error_code == -2) {
+ read_response->error_code = 0;
+ read_response->data = NULL;
+ read_response->data_size = 0;
+ }
+ }
+ configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
+
+ if ((read_response->error_code == -1) &&
+ (qstate->config_entry->perform_actual_lookups != 0)) {
+ free(read_response->data);
+ read_response->data = NULL;
+ read_response->data_size = 0;
+
+ lookup_agent = find_agent(s_agent_table,
+ read_request->entry, COMMON_AGENT);
+
+ if ((lookup_agent != NULL) &&
+ (lookup_agent->type == COMMON_AGENT)) {
+ c_agent = (struct common_agent *)lookup_agent;
+ res = c_agent->lookup_func(
+ read_request->cache_key +
+ qstate->eid_str_length,
+ read_request->cache_key_size -
+ qstate->eid_str_length,
+ &read_response->data,
+ &read_response->data_size);
+
+ if (res == NS_SUCCESS) {
+ read_response->error_code = 0;
+ configuration_lock_entry(
+ qstate->config_entry,
+ CELT_POSITIVE);
+ cache_write(c_entry,
+ read_request->cache_key,
+ read_request->cache_key_size,
+ read_response->data,
+ read_response->data_size);
+ configuration_unlock_entry(
+ qstate->config_entry,
+ CELT_POSITIVE);
+ } else if ((res == NS_NOTFOUND) ||
+ (res == NS_RETURN)) {
+ configuration_lock_entry(
+ qstate->config_entry,
+ CELT_NEGATIVE);
+ cache_write(neg_c_entry,
+ read_request->cache_key,
+ read_request->cache_key_size,
+ negative_data,
+ sizeof(negative_data));
+ configuration_unlock_entry(
+ qstate->config_entry,
+ CELT_NEGATIVE);
+
+ read_response->error_code = 0;
+ read_response->data = NULL;
+ read_response->data_size = 0;
+ }
+ }
+ }
+
+ if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
+ (qstate->config_entry->common_query_timeout.tv_usec != 0))
+ memcpy(&qstate->timeout,
+ &qstate->config_entry->common_query_timeout,
+ sizeof(struct timeval));
+ } else
+ read_response->error_code = -1;
+
+fin:
+ qstate->kevent_filter = EVFILT_WRITE;
+ if (read_response->error_code == 0)
+ qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
+ else
+ qstate->kevent_watermark = sizeof(int);
+ qstate->process_func = on_read_response_write1;
+
+ TRACE_OUT(on_read_request_process);
+ return (0);
+}
+
+static int
+on_read_response_write1(struct query_state *qstate)
+{
+ struct cache_read_response *read_response;
+ ssize_t result;
+
+ TRACE_IN(on_read_response_write1);
+ read_response = get_cache_read_response(&qstate->response);
+
+ result = qstate->write_func(qstate, &read_response->error_code,
+ sizeof(int));
+
+ if (read_response->error_code == 0) {
+ result += qstate->write_func(qstate, &read_response->data_size,
+ sizeof(size_t));
+ if (result != qstate->kevent_watermark) {
+ TRACE_OUT(on_read_response_write1);
+ return (-1);
+ }
+
+ qstate->kevent_watermark = read_response->data_size;
+ qstate->process_func = on_read_response_write2;
+ } else {
+ if (result != qstate->kevent_watermark) {
+ TRACE_OUT(on_read_response_write1);
+ return (-1);
+ }
+
+ qstate->kevent_watermark = 0;
+ qstate->process_func = NULL;
+ }
+
+ TRACE_OUT(on_read_response_write1);
+ return (0);
+}
+
+static int
+on_read_response_write2(struct query_state *qstate)
+{
+ struct cache_read_response *read_response;
+ ssize_t result;
+
+ TRACE_IN(on_read_response_write2);
+ read_response = get_cache_read_response(&qstate->response);
+ if (read_response->data_size > 0) {
+ result = qstate->write_func(qstate, read_response->data,
+ read_response->data_size);
+ if (result != qstate->kevent_watermark) {
+ TRACE_OUT(on_read_response_write2);
+ return (-1);
+ }
+ }
+
+ finalize_comm_element(&qstate->request);
+ finalize_comm_element(&qstate->response);
+
+ qstate->kevent_watermark = sizeof(int);
+ qstate->kevent_filter = EVFILT_READ;
+ qstate->process_func = on_rw_mapper;
+ TRACE_OUT(on_read_response_write2);
+ return (0);
+}
+
+/*
+ * The functions below are used to process write requests.
+ * - on_transform_request_read1 and on_transform_request_read2 read the
+ * request itself
+ * - on_transform_request_process processes it
+ * - on_transform_response_write1 sends the response
+ */
+static int
+on_transform_request_read1(struct query_state *qstate)
+{
+ struct cache_transform_request *transform_request;
+ ssize_t result;
+
+ TRACE_IN(on_transform_request_read1);
+ if (qstate->kevent_watermark == 0)
+ qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
+ else {
+ init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST);
+ transform_request =
+ get_cache_transform_request(&qstate->request);
+
+ result = qstate->read_func(qstate,
+ &transform_request->entry_length, sizeof(size_t));
+ result += qstate->read_func(qstate,
+ &transform_request->transformation_type, sizeof(int));
+
+ if (result != sizeof(size_t) + sizeof(int)) {
+ TRACE_OUT(on_transform_request_read1);
+ return (-1);
+ }
+
+ if ((transform_request->transformation_type != TT_USER) &&
+ (transform_request->transformation_type != TT_ALL)) {
+ TRACE_OUT(on_transform_request_read1);
+ return (-1);
+ }
+
+ if (transform_request->entry_length != 0) {
+ if (BUFSIZE_INVALID(transform_request->entry_length)) {
+ TRACE_OUT(on_transform_request_read1);
+ return (-1);
+ }
+
+ transform_request->entry = (char *)malloc(
+ transform_request->entry_length + 1);
+ assert(transform_request->entry != NULL);
+ memset(transform_request->entry, 0,
+ transform_request->entry_length + 1);
+
+ qstate->process_func = on_transform_request_read2;
+ } else
+ qstate->process_func = on_transform_request_process;
+
+ qstate->kevent_watermark = transform_request->entry_length;
+ }
+
+ TRACE_OUT(on_transform_request_read1);
+ return (0);
+}
+
+static int
+on_transform_request_read2(struct query_state *qstate)
+{
+ struct cache_transform_request *transform_request;
+ ssize_t result;
+
+ TRACE_IN(on_transform_request_read2);
+ transform_request = get_cache_transform_request(&qstate->request);
+
+ result = qstate->read_func(qstate, transform_request->entry,
+ transform_request->entry_length);
+
+ if (result != qstate->kevent_watermark) {
+ TRACE_OUT(on_transform_request_read2);
+ return (-1);
+ }
+
+ qstate->kevent_watermark = 0;
+ qstate->process_func = on_transform_request_process;
+
+ TRACE_OUT(on_transform_request_read2);
+ return (0);
+}
+
+static int
+on_transform_request_process(struct query_state *qstate)
+{
+ struct cache_transform_request *transform_request;
+ struct cache_transform_response *transform_response;
+ struct configuration_entry *config_entry;
+ size_t i, size;
+
+ TRACE_IN(on_transform_request_process);
+ init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE);
+ transform_response = get_cache_transform_response(&qstate->response);
+ transform_request = get_cache_transform_request(&qstate->request);
+
+ switch (transform_request->transformation_type) {
+ case TT_USER:
+ if (transform_request->entry == NULL) {
+ size = configuration_get_entries_size(s_configuration);
+ for (i = 0; i < size; ++i) {
+ config_entry = configuration_get_entry(
+ s_configuration, i);
+
+ if (config_entry->perform_actual_lookups == 0)
+ clear_config_entry_part(config_entry,
+ qstate->eid_str, qstate->eid_str_length);
+ }
+ } else {
+ qstate->config_entry = configuration_find_entry(
+ s_configuration, transform_request->entry);
+
+ if (qstate->config_entry == NULL) {
+ LOG_ERR_2("transform_request",
+ "can't find configuration"
+ " entry '%s'. aborting request",
+ transform_request->entry);
+ transform_response->error_code = -1;
+ goto fin;
+ }
+
+ if (qstate->config_entry->perform_actual_lookups != 0) {
+ LOG_ERR_2("transform_request",
+ "can't transform the cache entry %s"
+ ", because it ised for actual lookups",
+ transform_request->entry);
+ transform_response->error_code = -1;
+ goto fin;
+ }
+
+ clear_config_entry_part(qstate->config_entry,
+ qstate->eid_str, qstate->eid_str_length);
+ }
+ break;
+ case TT_ALL:
+ if (qstate->euid != 0)
+ transform_response->error_code = -1;
+ else {
+ if (transform_request->entry == NULL) {
+ size = configuration_get_entries_size(
+ s_configuration);
+ for (i = 0; i < size; ++i) {
+ clear_config_entry(
+ configuration_get_entry(
+ s_configuration, i));
+ }
+ } else {
+ qstate->config_entry = configuration_find_entry(
+ s_configuration,
+ transform_request->entry);
+
+ if (qstate->config_entry == NULL) {
+ LOG_ERR_2("transform_request",
+ "can't find configuration"
+ " entry '%s'. aborting request",
+ transform_request->entry);
+ transform_response->error_code = -1;
+ goto fin;
+ }
+
+ clear_config_entry(qstate->config_entry);
+ }
+ }
+ break;
+ default:
+ transform_response->error_code = -1;
+ }
+
+fin:
+ qstate->kevent_watermark = 0;
+ qstate->process_func = on_transform_response_write1;
+ TRACE_OUT(on_transform_request_process);
+ return (0);
+}
+
+static int
+on_transform_response_write1(struct query_state *qstate)
+{
+ struct cache_transform_response *transform_response;
+ ssize_t result;
+
+ TRACE_IN(on_transform_response_write1);
+ transform_response = get_cache_transform_response(&qstate->response);
+ result = qstate->write_func(qstate, &transform_response->error_code,
+ sizeof(int));
+ if (result != sizeof(int)) {
+ TRACE_OUT(on_transform_response_write1);
+ return (-1);
+ }
+
+ finalize_comm_element(&qstate->request);
+ finalize_comm_element(&qstate->response);
+
+ qstate->kevent_watermark = 0;
+ qstate->process_func = NULL;
+ TRACE_OUT(on_transform_response_write1);
+ return (0);
+}
+
+/*
+ * Checks if the client's euid and egid do not differ from its uid and gid.
+ * Returns 0 on success.
+ */
+int
+check_query_eids(struct query_state *qstate)
+{
+
+ return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0);
+}
+
+/*
+ * Uses the qstate fields to process an "alternate" read - when the buffer is
+ * too large to be received during one socket read operation
+ */
+ssize_t
+query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes)
+{
+ ssize_t result;
+
+ TRACE_IN(query_io_buffer_read);
+ if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
+ return (-1);
+
+ if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
+ qstate->io_buffer_p)
+ result = nbytes;
+ else
+ result = qstate->io_buffer + qstate->io_buffer_size -
+ qstate->io_buffer_p;
+
+ memcpy(buf, qstate->io_buffer_p, result);
+ qstate->io_buffer_p += result;
+
+ if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
+ free(qstate->io_buffer);
+ qstate->io_buffer = NULL;
+
+ qstate->write_func = query_socket_write;
+ qstate->read_func = query_socket_read;
+ }
+
+ TRACE_OUT(query_io_buffer_read);
+ return (result);
+}
+
+/*
+ * Uses the qstate fields to process an "alternate" write - when the buffer is
+ * too large to be sent during one socket write operation
+ */
+ssize_t
+query_io_buffer_write(struct query_state *qstate, const void *buf,
+ size_t nbytes)
+{
+ ssize_t result;
+
+ TRACE_IN(query_io_buffer_write);
+ if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
+ return (-1);
+
+ if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
+ qstate->io_buffer_p)
+ result = nbytes;
+ else
+ result = qstate->io_buffer + qstate->io_buffer_size -
+ qstate->io_buffer_p;
+
+ memcpy(qstate->io_buffer_p, buf, result);
+ qstate->io_buffer_p += result;
+
+ if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
+ qstate->use_alternate_io = 1;
+ qstate->io_buffer_p = qstate->io_buffer;
+
+ qstate->write_func = query_socket_write;
+ qstate->read_func = query_socket_read;
+ }
+
+ TRACE_OUT(query_io_buffer_write);
+ return (result);
+}
+
+/*
+ * The default "read" function, which reads data directly from socket
+ */
+ssize_t
+query_socket_read(struct query_state *qstate, void *buf, size_t nbytes)
+{
+ ssize_t result;
+
+ TRACE_IN(query_socket_read);
+ if (qstate->socket_failed != 0) {
+ TRACE_OUT(query_socket_read);
+ return (-1);
+ }
+
+ result = read(qstate->sockfd, buf, nbytes);
+ if ((result == -1) || (result < nbytes))
+ qstate->socket_failed = 1;
+
+ TRACE_OUT(query_socket_read);
+ return (result);
+}
+
+/*
+ * The default "write" function, which writes data directly to socket
+ */
+ssize_t
+query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes)
+{
+ ssize_t result;
+
+ TRACE_IN(query_socket_write);
+ if (qstate->socket_failed != 0) {
+ TRACE_OUT(query_socket_write);
+ return (-1);
+ }
+
+ result = write(qstate->sockfd, buf, nbytes);
+ if ((result == -1) || (result < nbytes))
+ qstate->socket_failed = 1;
+
+ TRACE_OUT(query_socket_write);
+ return (result);
+}
+
+/*
+ * Initializes the query_state structure by filling it with the default values.
+ */
+struct query_state *
+init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid)
+{
+ struct query_state *retval;
+
+ TRACE_IN(init_query_state);
+ retval = (struct query_state *)malloc(sizeof(struct query_state));
+ assert(retval != NULL);
+ memset(retval, 0, sizeof(struct query_state));
+
+ retval->sockfd = sockfd;
+ retval->kevent_filter = EVFILT_READ;
+ retval->kevent_watermark = kevent_watermark;
+
+ retval->euid = euid;
+ retval->egid = egid;
+ retval->uid = retval->gid = -1;
+
+ if (asprintf(&retval->eid_str, "%d_%d_", retval->euid,
+ retval->egid) == -1) {
+ free(retval);
+ return (NULL);
+ }
+ retval->eid_str_length = strlen(retval->eid_str);
+
+ init_comm_element(&retval->request, CET_UNDEFINED);
+ init_comm_element(&retval->response, CET_UNDEFINED);
+ retval->process_func = on_query_startup;
+ retval->destroy_func = on_query_destroy;
+
+ retval->write_func = query_socket_write;
+ retval->read_func = query_socket_read;
+
+ get_time_func(&retval->creation_time);
+ memcpy(&retval->timeout, &s_configuration->query_timeout,
+ sizeof(struct timeval));
+
+ TRACE_OUT(init_query_state);
+ return (retval);
+}
+
+void
+destroy_query_state(struct query_state *qstate)
+{
+
+ TRACE_IN(destroy_query_state);
+ if (qstate->eid_str != NULL)
+ free(qstate->eid_str);
+
+ if (qstate->io_buffer != NULL)
+ free(qstate->io_buffer);
+
+ qstate->destroy_func(qstate);
+ free(qstate);
+ TRACE_OUT(destroy_query_state);
+}
diff --git a/usr.sbin/nscd/query.h b/usr.sbin/nscd/query.h
new file mode 100644
index 0000000..5c4e918
--- /dev/null
+++ b/usr.sbin/nscd/query.h
@@ -0,0 +1,110 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_QUERY_H__
+#define __CACHED_QUERY_H__
+
+#include <sys/types.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include "cachelib.h"
+#include "config.h"
+#include "protocol.h"
+
+struct query_state;
+struct configuration;
+struct configuration_entry;
+
+typedef int (*query_process_func)(struct query_state *);
+typedef void (*query_destroy_func)(struct query_state *);
+typedef ssize_t (*query_read_func)(struct query_state *, void *, size_t);
+typedef ssize_t (*query_write_func)(struct query_state *, const void *,
+ size_t);
+
+/*
+ * The query state structure contains the information to process all types of
+ * requests and to send all types of responses.
+ */
+struct query_state {
+ struct timeval creation_time;
+ struct timeval timeout;
+
+ struct comm_element request;
+ struct comm_element response;
+ struct configuration_entry *config_entry;
+ void *mdata;
+
+ query_process_func process_func; /* called on each event */
+ query_destroy_func destroy_func; /* called on destroy */
+
+ /*
+ * By substituting these functions we can opaquely send and received
+ * very large buffers
+ */
+ query_write_func write_func; /* data write function */
+ query_read_func read_func; /* data read function */
+
+ char *eid_str; /* the user-identifying string (euid_egid_) */
+ size_t eid_str_length;
+
+ uid_t euid; /* euid of the caller, received via getpeereid */
+ uid_t uid; /* uid of the caller, received via credentials */
+ gid_t egid; /* egid of the caller, received via getpeereid */
+ gid_t gid; /* gid of the caller received via credentials */
+
+ size_t io_buffer_size;
+ size_t io_buffer_watermark;
+ size_t kevent_watermark; /* bytes to be sent/received */
+ int sockfd; /* the unix socket to read/write */
+ int kevent_filter; /* EVFILT_READ or EVFILT_WRITE */
+ int socket_failed; /* set to 1 if the socket doesn't work correctly */
+
+ /*
+ * These fields are used to opaquely proceed sending/receiving of
+ * the large buffers
+ */
+ char *io_buffer;
+ char *io_buffer_p;
+ int io_buffer_filter;
+ int use_alternate_io;
+};
+
+extern int check_query_eids(struct query_state *);
+
+extern ssize_t query_io_buffer_read(struct query_state *, void *, size_t);
+extern ssize_t query_io_buffer_write(struct query_state *, const void *,
+ size_t);
+
+extern ssize_t query_socket_read(struct query_state *, void *, size_t);
+extern ssize_t query_socket_write(struct query_state *, const void *,
+ size_t);
+
+extern struct query_state *init_query_state(int, size_t, uid_t, gid_t);
+extern void destroy_query_state(struct query_state *);
+
+#endif
diff --git a/usr.sbin/nscd/singletons.c b/usr.sbin/nscd/singletons.c
new file mode 100644
index 0000000..669d12b
--- /dev/null
+++ b/usr.sbin/nscd/singletons.c
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "singletons.h"
+
+struct configuration *s_configuration = NULL;
+cache s_cache = INVALID_CACHE;
+struct runtime_env *s_runtime_env = NULL;
+struct agent_table *s_agent_table = NULL;
diff --git a/usr.sbin/nscd/singletons.h b/usr.sbin/nscd/singletons.h
new file mode 100644
index 0000000..918519c
--- /dev/null
+++ b/usr.sbin/nscd/singletons.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __CACHED_SINGLETONS_H__
+#define __CACHED_SINGLETONS_H__
+
+#include "cachelib.h"
+#include "config.h"
+#include "agent.h"
+
+struct runtime_env {
+ int queue;
+ int sockfd;
+ int finished; /* for future use */
+};
+
+extern struct configuration *s_configuration;
+extern cache s_cache;
+extern struct runtime_env *s_runtime_env;
+extern struct agent_table *s_agent_table;
+
+#endif
OpenPOWER on IntegriCloud