amq_subr.c revision 310490
1/* 2 * Copyright (c) 1997-2014 Erez Zadok 3 * Copyright (c) 1990 Jan-Simon Pendry 4 * Copyright (c) 1990 Imperial College of Science, Technology & Medicine 5 * Copyright (c) 1990 The Regents of the University of California. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Jan-Simon Pendry at Imperial College, London. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * 36 * File: am-utils/amd/amq_subr.c 37 * 38 */ 39/* 40 * Auxiliary routines for amq tool 41 */ 42 43#ifdef HAVE_CONFIG_H 44# include <config.h> 45#endif /* HAVE_CONFIG_H */ 46#include <am_defs.h> 47#include <amd.h> 48 49/* forward definitions */ 50bool_t xdr_amq_mount_tree_node(XDR *xdrs, amq_mount_tree *objp); 51bool_t xdr_amq_mount_subtree(XDR *xdrs, amq_mount_tree *objp); 52 53 54voidp 55amqproc_null_1_svc(voidp argp, struct svc_req *rqstp) 56{ 57 static char res; 58 59 return (voidp) &res; 60} 61 62 63/* 64 * Return a sub-tree of mounts 65 */ 66amq_mount_tree_p * 67amqproc_mnttree_1_svc(voidp argp, struct svc_req *rqstp) 68{ 69 static am_node *mp; 70 71 mp = find_ap(*(char **) argp); 72 return (amq_mount_tree_p *) ((void *)&mp); 73} 74 75 76/* 77 * Unmount a single node 78 */ 79int * 80amqproc_umnt_1_svc(voidp argp, struct svc_req *rqstp) 81{ 82 static int res = AMQ_UMNT_OK; 83 am_node *mp = find_ap(*(char **) argp); 84 85 if (mp) 86 forcibly_timeout_mp(mp); 87 88 return &res; 89} 90 91 92/* 93 * Synchronously unmount a single node - parent side. 94 */ 95int * 96amqproc_sync_umnt_1_svc_parent(voidp argp, struct svc_req *rqstp) 97{ 98 amqproc_umnt_1_svc(argp, rqstp); 99 return NULL; 100} 101 102 103/* 104 * Synchronously unmount a single node - child side. 105 */ 106amq_sync_umnt * 107amqproc_sync_umnt_1_svc_child(voidp argp, struct svc_req *rqstp) 108{ 109 static amq_sync_umnt rv; 110 amq_sync_umnt buf; 111 ssize_t n; 112 113 am_node *mp = find_ap(*(char **) argp); 114 115 memset(&rv, 0, sizeof(rv)); 116 rv.au_etype = AMQ_UMNT_READ; 117 if (mp && mp->am_fd[0] >= 0) { 118 n = read(mp->am_fd[0], &buf, sizeof(buf)); 119 if (n == sizeof(buf)) 120 rv = buf; 121 } 122 return &rv; 123} 124 125 126/* 127 * Synchronously unmount a single node - use if we can't fork (asynchronous). 128 */ 129amq_sync_umnt * 130amqproc_sync_umnt_1_svc_async(voidp argp, struct svc_req *rqstp) 131{ 132 static amq_sync_umnt rv; 133 134 memset(&rv, 0, sizeof(rv)); 135 rv.au_etype = AMQ_UMNT_FORK; 136 rv.au_errno = errno; 137 138 amqproc_umnt_1_svc(argp, rqstp); 139 140 return &rv; 141} 142 143 144/* 145 * Return global statistics 146 */ 147amq_mount_stats * 148amqproc_stats_1_svc(voidp argp, struct svc_req *rqstp) 149{ 150 return (amq_mount_stats *) ((void *)&amd_stats); 151} 152 153 154/* 155 * Return the entire tree of mount nodes 156 */ 157amq_mount_tree_list * 158amqproc_export_1_svc(voidp argp, struct svc_req *rqstp) 159{ 160 static amq_mount_tree_list aml; 161 static am_node *mp; 162 163 mp = get_exported_ap(0); 164 aml.amq_mount_tree_list_val = (amq_mount_tree_p *) ((void *) &mp); 165 aml.amq_mount_tree_list_len = 1; /* XXX */ 166 167 return &aml; 168} 169 170 171int * 172amqproc_setopt_1_svc(voidp argp, struct svc_req *rqstp) 173{ 174 static int rc; 175 amq_setopt *opt = (amq_setopt *) argp; 176 177 rc = 0; 178 179 switch (opt->as_opt) { 180 181 case AMOPT_DEBUG: 182 if (debug_option(opt->as_str)) 183 rc = EINVAL; 184 break; 185 186 case AMOPT_LOGFILE: 187 if (gopt.logfile && opt->as_str 188 && STREQ(gopt.logfile, opt->as_str)) { 189 if (switch_to_logfile(opt->as_str, orig_umask, 0)) 190 rc = EINVAL; 191 } else { 192 rc = EACCES; 193 } 194 break; 195 196 case AMOPT_XLOG: 197 if (switch_option(opt->as_str)) 198 rc = EINVAL; 199 break; 200 201 case AMOPT_FLUSHMAPC: 202 if (amd_state == Run) { 203 plog(XLOG_INFO, "amq says flush cache"); 204 do_mapc_reload = 0; 205 flush_nfs_fhandle_cache((fserver *) NULL); 206 flush_srvr_nfs_cache((fserver *) NULL); 207 } 208 break; 209 } 210 211 return &rc; 212} 213 214 215amq_mount_info_list * 216amqproc_getmntfs_1_svc(voidp argp, struct svc_req *rqstp) 217{ 218 return (amq_mount_info_list *) ((void *)&mfhead); /* XXX */ 219} 220 221extern qelem map_list_head; 222amq_map_info_list * 223amqproc_getmapinfo_1_svc(voidp argp, struct svc_req *rqstp) 224{ 225 return (amq_map_info_list *) ((void *)&map_list_head); /* XXX */ 226} 227 228amq_string * 229amqproc_getvers_1_svc(voidp argp, struct svc_req *rqstp) 230{ 231 static amq_string res; 232 233 res = get_version_string(); 234 return &res; 235} 236 237 238/* get PID of remote amd */ 239int * 240amqproc_getpid_1_svc(voidp argp, struct svc_req *rqstp) 241{ 242 static int res; 243 244 res = getpid(); 245 return &res; 246} 247 248 249/* 250 * Process PAWD string of remote pawd tool. 251 * 252 * We repeat the resolution of the string until the resolved string resolves 253 * to itself. This ensures that we follow path resolutions through all 254 * possible Amd mount points until we reach some sort of convergence. To 255 * prevent possible infinite loops, we break out of this loop if the strings 256 * do not converge after MAX_PAWD_TRIES times. 257 */ 258amq_string * 259amqproc_pawd_1_svc(voidp argp, struct svc_req *rqstp) 260{ 261 static amq_string res; 262#define MAX_PAWD_TRIES 10 263 int index, len, maxagain = MAX_PAWD_TRIES; 264 am_node *mp; 265 char *mountpoint; 266 char *dir = *(char **) argp; 267 static char tmp_buf[MAXPATHLEN]; 268 char prev_buf[MAXPATHLEN]; 269 270 tmp_buf[0] = prev_buf[0] = '\0'; /* default is empty string: no match */ 271 do { 272 for (mp = get_first_exported_ap(&index); 273 mp; 274 mp = get_next_exported_ap(&index)) { 275 if (STREQ(mp->am_al->al_mnt->mf_ops->fs_type, "toplvl")) 276 continue; 277 if (STREQ(mp->am_al->al_mnt->mf_ops->fs_type, "auto")) 278 continue; 279 mountpoint = (mp->am_link ? mp->am_link : mp->am_al->al_mnt->mf_mount); 280 len = strlen(mountpoint); 281 if (len == 0) 282 continue; 283 if (!NSTREQ(mountpoint, dir, len)) 284 continue; 285 if (dir[len] != '\0' && dir[len] != '/') 286 continue; 287 xstrlcpy(tmp_buf, mp->am_path, sizeof(tmp_buf)); 288 xstrlcat(tmp_buf, &dir[len], sizeof(tmp_buf)); 289 break; 290 } /* end of "for" loop */ 291 /* once tmp_buf and prev_buf are equal, break out of "do" loop */ 292 if (STREQ(tmp_buf, prev_buf)) 293 break; 294 else 295 xstrlcpy(prev_buf, tmp_buf, sizeof(prev_buf)); 296 } while (--maxagain); 297 /* check if we couldn't resolve the string after MAX_PAWD_TRIES times */ 298 if (maxagain <= 0) 299 plog(XLOG_WARNING, "path \"%s\" did not resolve after %d tries", 300 tmp_buf, MAX_PAWD_TRIES); 301 302 res = tmp_buf; 303 return &res; 304} 305 306 307/* 308 * XDR routines. 309 */ 310 311 312bool_t 313xdr_amq_setopt(XDR *xdrs, amq_setopt *objp) 314{ 315 if (!xdr_enum(xdrs, (enum_t *) ((voidp) &objp->as_opt))) { 316 return (FALSE); 317 } 318 if (!xdr_string(xdrs, &objp->as_str, AMQ_STRLEN)) { 319 return (FALSE); 320 } 321 return (TRUE); 322} 323 324 325/* 326 * More XDR routines - Should be used for OUTPUT ONLY. 327 */ 328bool_t 329xdr_amq_mount_tree_node(XDR *xdrs, amq_mount_tree *objp) 330{ 331 am_node *mp = (am_node *) objp; 332 long mtime; 333 334 if (!xdr_amq_string(xdrs, &mp->am_al->al_mnt->mf_info)) { 335 return (FALSE); 336 } 337 if (!xdr_amq_string(xdrs, &mp->am_path)) { 338 return (FALSE); 339 } 340 if (!xdr_amq_string(xdrs, mp->am_link ? &mp->am_link : &mp->am_al->al_mnt->mf_mount)) { 341 return (FALSE); 342 } 343 if (!xdr_amq_string(xdrs, &mp->am_al->al_mnt->mf_ops->fs_type)) { 344 return (FALSE); 345 } 346 mtime = mp->am_stats.s_mtime; 347 if (!xdr_long(xdrs, &mtime)) { 348 return (FALSE); 349 } 350 if (!xdr_u_short(xdrs, &mp->am_stats.s_uid)) { 351 return (FALSE); 352 } 353 if (!xdr_int(xdrs, &mp->am_stats.s_getattr)) { 354 return (FALSE); 355 } 356 if (!xdr_int(xdrs, &mp->am_stats.s_lookup)) { 357 return (FALSE); 358 } 359 if (!xdr_int(xdrs, &mp->am_stats.s_readdir)) { 360 return (FALSE); 361 } 362 if (!xdr_int(xdrs, &mp->am_stats.s_readlink)) { 363 return (FALSE); 364 } 365 if (!xdr_int(xdrs, &mp->am_stats.s_statfs)) { 366 return (FALSE); 367 } 368 return (TRUE); 369} 370 371 372bool_t 373xdr_amq_mount_subtree(XDR *xdrs, amq_mount_tree *objp) 374{ 375 am_node *mp = (am_node *) objp; 376 377 if (!xdr_amq_mount_tree_node(xdrs, objp)) { 378 return (FALSE); 379 } 380 if (!xdr_pointer(xdrs, 381 (char **) ((voidp) &mp->am_osib), 382 sizeof(amq_mount_tree), 383 (XDRPROC_T_TYPE) xdr_amq_mount_subtree)) { 384 return (FALSE); 385 } 386 if (!xdr_pointer(xdrs, 387 (char **) ((voidp) &mp->am_child), 388 sizeof(amq_mount_tree), 389 (XDRPROC_T_TYPE) xdr_amq_mount_subtree)) { 390 return (FALSE); 391 } 392 return (TRUE); 393} 394 395 396bool_t 397xdr_amq_mount_tree(XDR *xdrs, amq_mount_tree *objp) 398{ 399 am_node *mp = (am_node *) objp; 400 am_node *mnil = NULL; 401 402 if (!xdr_amq_mount_tree_node(xdrs, objp)) { 403 return (FALSE); 404 } 405 if (!xdr_pointer(xdrs, 406 (char **) ((voidp) &mnil), 407 sizeof(amq_mount_tree), 408 (XDRPROC_T_TYPE) xdr_amq_mount_subtree)) { 409 return (FALSE); 410 } 411 if (!xdr_pointer(xdrs, 412 (char **) ((voidp) &mp->am_child), 413 sizeof(amq_mount_tree), 414 (XDRPROC_T_TYPE) xdr_amq_mount_subtree)) { 415 return (FALSE); 416 } 417 return (TRUE); 418} 419 420 421bool_t 422xdr_amq_mount_tree_p(XDR *xdrs, amq_mount_tree_p *objp) 423{ 424 if (!xdr_pointer(xdrs, (char **) objp, sizeof(amq_mount_tree), (XDRPROC_T_TYPE) xdr_amq_mount_tree)) { 425 return (FALSE); 426 } 427 return (TRUE); 428} 429 430 431bool_t 432xdr_amq_mount_stats(XDR *xdrs, amq_mount_stats *objp) 433{ 434 if (!xdr_int(xdrs, &objp->as_drops)) { 435 return (FALSE); 436 } 437 if (!xdr_int(xdrs, &objp->as_stale)) { 438 return (FALSE); 439 } 440 if (!xdr_int(xdrs, &objp->as_mok)) { 441 return (FALSE); 442 } 443 if (!xdr_int(xdrs, &objp->as_merr)) { 444 return (FALSE); 445 } 446 if (!xdr_int(xdrs, &objp->as_uerr)) { 447 return (FALSE); 448 } 449 return (TRUE); 450} 451 452 453 454bool_t 455xdr_amq_mount_tree_list(XDR *xdrs, amq_mount_tree_list *objp) 456{ 457 if (!xdr_array(xdrs, 458 (char **) ((voidp) &objp->amq_mount_tree_list_val), 459 (u_int *) &objp->amq_mount_tree_list_len, 460 ~0, 461 sizeof(amq_mount_tree_p), 462 (XDRPROC_T_TYPE) xdr_amq_mount_tree_p)) { 463 return (FALSE); 464 } 465 return (TRUE); 466} 467 468 469bool_t 470xdr_amq_mount_info_qelem(XDR *xdrs, qelem *qhead) 471{ 472 mntfs *mf; 473 u_int len = 0; 474 475 /* 476 * Compute length of list 477 */ 478 for (mf = AM_LAST(mntfs, qhead); mf != HEAD(mntfs, qhead); mf = PREV(mntfs, mf)) { 479 if (!(mf->mf_fsflags & FS_AMQINFO)) 480 continue; 481 len++; 482 } 483 xdr_u_int(xdrs, &len); 484 485 /* 486 * Send individual data items 487 */ 488 for (mf = AM_LAST(mntfs, qhead); mf != HEAD(mntfs, qhead); mf = PREV(mntfs, mf)) { 489 int up; 490 if (!(mf->mf_fsflags & FS_AMQINFO)) 491 continue; 492 493 if (!xdr_amq_string(xdrs, &mf->mf_ops->fs_type)) { 494 return (FALSE); 495 } 496 if (!xdr_amq_string(xdrs, &mf->mf_mount)) { 497 return (FALSE); 498 } 499 if (!xdr_amq_string(xdrs, &mf->mf_info)) { 500 return (FALSE); 501 } 502 if (!xdr_amq_string(xdrs, &mf->mf_server->fs_host)) { 503 return (FALSE); 504 } 505 if (!xdr_int(xdrs, &mf->mf_error)) { 506 return (FALSE); 507 } 508 if (!xdr_int(xdrs, &mf->mf_refc)) { 509 return (FALSE); 510 } 511 if (FSRV_ERROR(mf->mf_server) || FSRV_ISDOWN(mf->mf_server)) 512 up = 0; 513 else if (FSRV_ISUP(mf->mf_server)) 514 up = 1; 515 else 516 up = -1; 517 if (!xdr_int(xdrs, &up)) { 518 return (FALSE); 519 } 520 } 521 return (TRUE); 522} 523 524bool_t 525xdr_amq_map_info_qelem(XDR *xdrs, qelem *qhead) 526{ 527 mnt_map *m; 528 u_int len = 0; 529 int x; 530 char *n; 531 532 /* 533 * Compute length of list 534 */ 535 ITER(m, mnt_map, qhead) { 536 len++; 537 } 538 539 if (!xdr_u_int(xdrs, &len)) 540 return (FALSE); 541 542 /* 543 * Send individual data items 544 */ 545 ITER(m, mnt_map, qhead) { 546 if (!xdr_amq_string(xdrs, &m->map_name)) { 547 return (FALSE); 548 } 549 550 n = m->wildcard ? m->wildcard : ""; 551 if (!xdr_amq_string(xdrs, &n)) { 552 return (FALSE); 553 } 554 555 if (!xdr_long(xdrs, (long *) &m->modify)) { 556 return (FALSE); 557 } 558 559 x = m->flags; 560 if (!xdr_int(xdrs, &x)) { 561 return (FALSE); 562 } 563 564 x = m->nentries; 565 if (!xdr_int(xdrs, &x)) { 566 return (FALSE); 567 } 568 569 x = m->reloads; 570 if (!xdr_int(xdrs, &x)) { 571 return (FALSE); 572 } 573 574 if (!xdr_int(xdrs, &m->refc)) { 575 return (FALSE); 576 } 577 578 if (m->isup) 579 x = (*m->isup)(m, m->map_name); 580 else 581 x = -1; 582 if (!xdr_int(xdrs, &x)) { 583 return (FALSE); 584 } 585 } 586 return (TRUE); 587} 588 589bool_t 590xdr_pri_free(XDRPROC_T_TYPE xdr_args, caddr_t args_ptr) 591{ 592 XDR xdr; 593 594 xdr.x_op = XDR_FREE; 595 return ((*xdr_args) (&xdr, (caddr_t *) args_ptr)); 596} 597