2 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
3 * unrestricted use provided that this legend is included on all tape
4 * media and as a part of the software program in whole or part. Users
5 * may copy or modify Sun RPC without charge, but are not authorized
6 * to license or distribute it to anyone else except as part of a product or
7 * program developed by the user.
9 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
10 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
11 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
13 * Sun RPC is provided with no support and without any obligation on the
14 * part of Sun Microsystems, Inc. to assist in its use, correction,
15 * modification or enhancement.
17 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
18 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
19 * OR ANY PART THEREOF.
21 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
22 * or profits or other special, indirect and consequential damages, even if
23 * Sun has been advised of the possibility of such damages.
25 * Sun Microsystems, Inc.
27 * Mountain View, California 94043
29 * @(#)svc_dg.c 1.17 94/04/24 SMI
30 * $NetBSD: svc_dg.c,v 1.4 2000/07/06 03:10:35 christos Exp $
31 * $FreeBSD: src/lib/libc/rpc/svc_dg.c,v 1.8 2006/02/27 22:10:59 deischen Exp $
35 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
39 * svc_dg.c, Server side for connectionless RPC.
41 * Does some caching in the hopes of achieving execute-at-most-once semantics.
44 #include "namespace.h"
45 #include "reentrant.h"
46 #include <sys/types.h>
47 #include <sys/socket.h>
49 #include <rpc/svc_dg.h>
55 #ifdef RPC_CACHE_DEBUG
56 #include <netconfig.h>
60 #include "un-namespace.h"
65 #define su_data(xprt) ((struct svc_dg_data *)(xprt->xp_p2))
66 #define rpc_buffer(xprt) ((xprt)->xp_p1)
69 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
72 static void svc_dg_ops(SVCXPRT *);
73 static enum xprt_stat svc_dg_stat(SVCXPRT *);
74 static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *);
75 static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *);
76 static bool_t svc_dg_getargs(SVCXPRT *, xdrproc_t, void *);
77 static bool_t svc_dg_freeargs(SVCXPRT *, xdrproc_t, void *);
78 static void svc_dg_destroy(SVCXPRT *);
79 static bool_t svc_dg_control(SVCXPRT *, const u_int, void *);
80 static int cache_get(SVCXPRT *, struct rpc_msg *, char **, size_t *);
81 static void cache_set(SVCXPRT *, size_t);
82 int svc_dg_enablecache(SVCXPRT *, u_int);
86 * xprt = svc_dg_create(sock, sendsize, recvsize);
87 * Does other connectionless specific initializations.
88 * Once *xprt is initialized, it is registered.
89 * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
90 * system defaults are chosen.
91 * The routines returns NULL if a problem occurred.
93 static const char svc_dg_str[] = "svc_dg_create: %s";
94 static const char svc_dg_err1[] = "could not get transport information";
95 static const char svc_dg_err2[] = " transport does not support data transfer";
96 static const char __no_mem_str[] = "out of memory";
99 svc_dg_create(int fd, u_int sendsize, u_int recvsize)
102 struct svc_dg_data *su = NULL;
103 struct __rpc_sockinfo si;
104 struct sockaddr_storage ss;
107 if (!__rpc_fd2sockinfo(fd, &si)) {
108 warnx(svc_dg_str, svc_dg_err1);
112 * Find the receive and the send size
114 sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
115 recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
116 if ((sendsize == 0) || (recvsize == 0)) {
117 warnx(svc_dg_str, svc_dg_err2);
121 xprt = mem_alloc(sizeof (SVCXPRT));
124 memset(xprt, 0, sizeof (SVCXPRT));
126 su = mem_alloc(sizeof (*su));
129 su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4;
130 if ((rpc_buffer(xprt) = mem_alloc(su->su_iosz)) == NULL)
132 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz,
137 xprt->xp_verf.oa_base = su->su_verfbody;
139 xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
142 if (_getsockname(fd, (struct sockaddr *)(void *)&ss, &slen) < 0)
144 xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
145 xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
146 xprt->xp_ltaddr.len = slen;
147 memcpy(xprt->xp_ltaddr.buf, &ss, slen);
152 warnx(svc_dg_str, __no_mem_str);
155 mem_free(su, sizeof (*su));
156 mem_free(xprt, sizeof (SVCXPRT));
162 static enum xprt_stat
163 svc_dg_stat(SVCXPRT *xprt)
169 svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg)
171 struct svc_dg_data *su = su_data(xprt);
172 XDR *xdrs = &(su->su_xdrs);
174 struct sockaddr_storage ss;
180 alen = sizeof (struct sockaddr_storage);
181 rlen = _recvfrom(xprt->xp_fd, rpc_buffer(xprt), su->su_iosz, 0,
182 (struct sockaddr *)(void *)&ss, &alen);
183 if (rlen == -1 && errno == EINTR)
185 if (rlen == -1 || (rlen < (ssize_t)(4 * sizeof (u_int32_t))))
187 if (xprt->xp_rtaddr.len < alen) {
188 if (xprt->xp_rtaddr.len != 0)
189 mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.len);
190 xprt->xp_rtaddr.buf = mem_alloc(alen);
191 xprt->xp_rtaddr.len = alen;
193 memcpy(xprt->xp_rtaddr.buf, &ss, alen);
195 if (ss.ss_family == AF_INET) {
196 xprt->xp_raddr = *(struct sockaddr_in *)xprt->xp_rtaddr.buf;
197 xprt->xp_addrlen = sizeof (struct sockaddr_in);
200 xdrs->x_op = XDR_DECODE;
202 if (! xdr_callmsg(xdrs, msg)) {
205 su->su_xid = msg->rm_xid;
206 if (su->su_cache != NULL) {
207 if (cache_get(xprt, msg, &reply, &replylen)) {
208 _sendto(xprt->xp_fd, reply, replylen, 0,
209 (struct sockaddr *)(void *)&ss, alen);
217 svc_dg_reply(SVCXPRT *xprt, struct rpc_msg *msg)
219 struct svc_dg_data *su = su_data(xprt);
220 XDR *xdrs = &(su->su_xdrs);
224 xdrs->x_op = XDR_ENCODE;
226 msg->rm_xid = su->su_xid;
227 if (xdr_replymsg(xdrs, msg)) {
228 slen = XDR_GETPOS(xdrs);
229 if (_sendto(xprt->xp_fd, rpc_buffer(xprt), slen, 0,
230 (struct sockaddr *)xprt->xp_rtaddr.buf,
231 (socklen_t)xprt->xp_rtaddr.len) == (ssize_t) slen) {
234 cache_set(xprt, slen);
241 svc_dg_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
243 return (*xdr_args)(&(su_data(xprt)->su_xdrs), args_ptr);
247 svc_dg_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
249 XDR *xdrs = &(su_data(xprt)->su_xdrs);
251 xdrs->x_op = XDR_FREE;
252 return (*xdr_args)(xdrs, args_ptr);
256 svc_dg_destroy(SVCXPRT *xprt)
258 struct svc_dg_data *su = su_data(xprt);
260 xprt_unregister(xprt);
261 if (xprt->xp_fd != -1)
263 XDR_DESTROY(&(su->su_xdrs));
264 mem_free(rpc_buffer(xprt), su->su_iosz);
265 mem_free(su, sizeof (*su));
266 if (xprt->xp_rtaddr.buf)
267 mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
268 if (xprt->xp_ltaddr.buf)
269 mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
272 mem_free(xprt, sizeof (SVCXPRT));
277 svc_dg_control(SVCXPRT *xprt, const u_int rq, void *in)
283 svc_dg_ops(SVCXPRT *xprt)
285 static struct xp_ops ops;
286 static struct xp_ops2 ops2;
288 /* VARIABLES PROTECTED BY ops_lock: ops */
290 mutex_lock(&ops_lock);
291 if (ops.xp_recv == NULL) {
292 ops.xp_recv = svc_dg_recv;
293 ops.xp_stat = svc_dg_stat;
294 ops.xp_getargs = svc_dg_getargs;
295 ops.xp_reply = svc_dg_reply;
296 ops.xp_freeargs = svc_dg_freeargs;
297 ops.xp_destroy = svc_dg_destroy;
298 ops2.xp_control = svc_dg_control;
301 xprt->xp_ops2 = &ops2;
302 mutex_unlock(&ops_lock);
305 /* The CACHING COMPONENT */
308 * Could have been a separate file, but some part of it depends upon the
309 * private structure of the client handle.
311 * Fifo cache for cl server
312 * Copies pointers to reply buffers into fifo cache
313 * Buffers are sent again if retransmissions are detected.
316 #define SPARSENESS 4 /* 75% sparse */
318 #define ALLOC(type, size) \
319 (type *) mem_alloc((sizeof (type) * (size)))
321 #define MEMZERO(addr, type, size) \
322 memset((void *) (addr), 0, sizeof (type) * (int) (size))
324 #define FREE(addr, type, size) \
325 mem_free((addr), (sizeof (type) * (size)))
328 * An entry in the cache
330 typedef struct cache_node *cache_ptr;
333 * Index into cache is xid, proc, vers, prog and address
336 rpcproc_t cache_proc;
337 rpcvers_t cache_vers;
338 rpcprog_t cache_prog;
339 struct netbuf cache_addr;
341 * The cached reply and length
344 size_t cache_replylen;
346 * Next node on the list, if there is a collision
348 cache_ptr cache_next;
355 u_int uc_size; /* size of cache */
356 cache_ptr *uc_entries; /* hash table of entries in cache */
357 cache_ptr *uc_fifo; /* fifo list of entries in cache */
358 u_int uc_nextvictim; /* points to next victim in fifo list */
359 rpcprog_t uc_prog; /* saved program number */
360 rpcvers_t uc_vers; /* saved version number */
361 rpcproc_t uc_proc; /* saved procedure number */
366 * the hashing function
368 #define CACHE_LOC(transp, xid) \
369 (xid % (SPARSENESS * ((struct cl_cache *) \
370 su_data(transp)->su_cache)->uc_size))
373 * Enable use of the cache. Returns 1 on success, 0 on failure.
374 * Note: there is no disable.
376 static const char cache_enable_str[] = "svc_enablecache: %s %s";
377 static const char alloc_err[] = "could not allocate cache ";
378 static const char enable_err[] = "cache already enabled";
381 svc_dg_enablecache(SVCXPRT *transp, u_int size)
383 struct svc_dg_data *su = su_data(transp);
386 mutex_lock(&dupreq_lock);
387 if (su->su_cache != NULL) {
388 warnx(cache_enable_str, enable_err, " ");
389 mutex_unlock(&dupreq_lock);
392 uc = ALLOC(struct cl_cache, 1);
394 warnx(cache_enable_str, alloc_err, " ");
395 mutex_unlock(&dupreq_lock);
399 uc->uc_nextvictim = 0;
400 uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
401 if (uc->uc_entries == NULL) {
402 warnx(cache_enable_str, alloc_err, "data");
403 FREE(uc, struct cl_cache, 1);
404 mutex_unlock(&dupreq_lock);
407 MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
408 uc->uc_fifo = ALLOC(cache_ptr, size);
409 if (uc->uc_fifo == NULL) {
410 warnx(cache_enable_str, alloc_err, "fifo");
411 FREE(uc->uc_entries, cache_ptr, size * SPARSENESS);
412 FREE(uc, struct cl_cache, 1);
413 mutex_unlock(&dupreq_lock);
416 MEMZERO(uc->uc_fifo, cache_ptr, size);
417 su->su_cache = (char *)(void *)uc;
418 mutex_unlock(&dupreq_lock);
423 * Set an entry in the cache. It assumes that the uc entry is set from
424 * the earlier call to cache_get() for the same procedure. This will always
425 * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
426 * by svc_dg_reply(). All this hoopla because the right RPC parameters are
427 * not available at svc_dg_reply time.
430 static const char cache_set_str[] = "cache_set: %s";
431 static const char cache_set_err1[] = "victim not found";
432 static const char cache_set_err2[] = "victim alloc failed";
433 static const char cache_set_err3[] = "could not allocate new rpc buffer";
436 cache_set(SVCXPRT *xprt, size_t replylen)
440 struct svc_dg_data *su = su_data(xprt);
441 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
444 #ifdef RPC_CACHE_DEBUG
445 struct netconfig *nconf;
449 mutex_lock(&dupreq_lock);
451 * Find space for the new entry, either by
452 * reusing an old entry, or by mallocing a new one
454 victim = uc->uc_fifo[uc->uc_nextvictim];
455 if (victim != NULL) {
456 loc = CACHE_LOC(xprt, victim->cache_xid);
457 for (vicp = &uc->uc_entries[loc];
458 *vicp != NULL && *vicp != victim;
459 vicp = &(*vicp)->cache_next)
462 warnx(cache_set_str, cache_set_err1);
463 mutex_unlock(&dupreq_lock);
466 *vicp = victim->cache_next; /* remove from cache */
467 newbuf = victim->cache_reply;
469 victim = ALLOC(struct cache_node, 1);
470 if (victim == NULL) {
471 warnx(cache_set_str, cache_set_err2);
472 mutex_unlock(&dupreq_lock);
475 newbuf = mem_alloc(su->su_iosz);
476 if (newbuf == NULL) {
477 warnx(cache_set_str, cache_set_err3);
478 FREE(victim, struct cache_node, 1);
479 mutex_unlock(&dupreq_lock);
487 #ifdef RPC_CACHE_DEBUG
488 if (nconf = getnetconfigent(xprt->xp_netid)) {
489 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
490 freenetconfigent(nconf);
492 "cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
493 su->su_xid, uc->uc_prog, uc->uc_vers,
498 victim->cache_replylen = replylen;
499 victim->cache_reply = rpc_buffer(xprt);
500 rpc_buffer(xprt) = newbuf;
501 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt),
502 su->su_iosz, XDR_ENCODE);
503 victim->cache_xid = su->su_xid;
504 victim->cache_proc = uc->uc_proc;
505 victim->cache_vers = uc->uc_vers;
506 victim->cache_prog = uc->uc_prog;
507 victim->cache_addr = xprt->xp_rtaddr;
508 victim->cache_addr.buf = ALLOC(char, xprt->xp_rtaddr.len);
509 memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf,
510 (size_t)xprt->xp_rtaddr.len);
511 loc = CACHE_LOC(xprt, victim->cache_xid);
512 victim->cache_next = uc->uc_entries[loc];
513 uc->uc_entries[loc] = victim;
514 uc->uc_fifo[uc->uc_nextvictim++] = victim;
515 uc->uc_nextvictim %= uc->uc_size;
516 mutex_unlock(&dupreq_lock);
520 * Try to get an entry from the cache
521 * return 1 if found, 0 if not found and set the stage for cache_set()
524 cache_get(SVCXPRT *xprt, struct rpc_msg *msg, char **replyp, size_t *replylenp)
528 struct svc_dg_data *su = su_data(xprt);
529 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
530 #ifdef RPC_CACHE_DEBUG
531 struct netconfig *nconf;
535 mutex_lock(&dupreq_lock);
536 loc = CACHE_LOC(xprt, su->su_xid);
537 for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
538 if (ent->cache_xid == su->su_xid &&
539 ent->cache_proc == msg->rm_call.cb_proc &&
540 ent->cache_vers == msg->rm_call.cb_vers &&
541 ent->cache_prog == msg->rm_call.cb_prog &&
542 ent->cache_addr.len == xprt->xp_rtaddr.len &&
543 (memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf,
544 xprt->xp_rtaddr.len) == 0)) {
545 #ifdef RPC_CACHE_DEBUG
546 if (nconf = getnetconfigent(xprt->xp_netid)) {
547 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
548 freenetconfigent(nconf);
550 "cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
551 su->su_xid, msg->rm_call.cb_prog,
552 msg->rm_call.cb_vers,
553 msg->rm_call.cb_proc, uaddr);
557 *replyp = ent->cache_reply;
558 *replylenp = ent->cache_replylen;
559 mutex_unlock(&dupreq_lock);
564 * Failed to find entry
565 * Remember a few things so we can do a set later
567 uc->uc_proc = msg->rm_call.cb_proc;
568 uc->uc_vers = msg->rm_call.cb_vers;
569 uc->uc_prog = msg->rm_call.cb_prog;
570 mutex_unlock(&dupreq_lock);