1/* $NetBSD: coda_vnops.c,v 1.103 2016/08/20 12:37:06 hannken Exp $ */
2
3/*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32 */
33
34/*
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
40 */
41
42/*
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
46 */
47
48#include <sys/cdefs.h>
49__KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.103 2016/08/20 12:37:06 hannken Exp $");
50
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/malloc.h>
54#include <sys/errno.h>
55#include <sys/acct.h>
56#include <sys/file.h>
57#include <sys/uio.h>
58#include <sys/namei.h>
59#include <sys/ioctl.h>
60#include <sys/mount.h>
61#include <sys/proc.h>
62#include <sys/select.h>
63#include <sys/vnode.h>
64#include <sys/kauth.h>
65
66#include <miscfs/genfs/genfs.h>
67#include <miscfs/specfs/specdev.h>
68
69#include <coda/coda.h>
70#include <coda/cnode.h>
71#include <coda/coda_vnops.h>
72#include <coda/coda_venus.h>
73#include <coda/coda_opstats.h>
74#include <coda/coda_subr.h>
75#include <coda/coda_namecache.h>
76#include <coda/coda_pioctl.h>
77
78/*
79 * These flags select various performance enhancements.
80 */
81int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
82int coda_symlink_cache = 1; /* Set to cache symbolic link information */
83int coda_access_cache = 1; /* Set to handle some access checks directly */
84
85/* structure to keep track of vfs calls */
86
87struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
88
89#define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
90#define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
91#define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
92#define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
93
94/* What we are delaying for in printf */
95static int coda_lockdebug = 0;
96
97#define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
98
99/* Definition of the vnode operation vector */
100
101const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
102 { &vop_default_desc, coda_vop_error },
103 { &vop_lookup_desc, coda_lookup }, /* lookup */
104 { &vop_create_desc, coda_create }, /* create */
105 { &vop_mknod_desc, coda_vop_error }, /* mknod */
106 { &vop_open_desc, coda_open }, /* open */
107 { &vop_close_desc, coda_close }, /* close */
108 { &vop_access_desc, coda_access }, /* access */
109 { &vop_getattr_desc, coda_getattr }, /* getattr */
110 { &vop_setattr_desc, coda_setattr }, /* setattr */
111 { &vop_read_desc, coda_read }, /* read */
112 { &vop_write_desc, coda_write }, /* write */
113 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */
114 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */
115 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
116 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
117 { &vop_mmap_desc, genfs_mmap }, /* mmap */
118 { &vop_fsync_desc, coda_fsync }, /* fsync */
119 { &vop_remove_desc, coda_remove }, /* remove */
120 { &vop_link_desc, coda_link }, /* link */
121 { &vop_rename_desc, coda_rename }, /* rename */
122 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
123 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
124 { &vop_symlink_desc, coda_symlink }, /* symlink */
125 { &vop_readdir_desc, coda_readdir }, /* readdir */
126 { &vop_readlink_desc, coda_readlink }, /* readlink */
127 { &vop_abortop_desc, coda_abortop }, /* abortop */
128 { &vop_inactive_desc, coda_inactive }, /* inactive */
129 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
130 { &vop_lock_desc, coda_lock }, /* lock */
131 { &vop_unlock_desc, coda_unlock }, /* unlock */
132 { &vop_bmap_desc, coda_bmap }, /* bmap */
133 { &vop_strategy_desc, coda_strategy }, /* strategy */
134 { &vop_print_desc, coda_vop_error }, /* print */
135 { &vop_islocked_desc, coda_islocked }, /* islocked */
136 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */
137 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
138 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
139 { &vop_seek_desc, genfs_seek }, /* seek */
140 { &vop_poll_desc, genfs_poll }, /* poll */
141 { &vop_getpages_desc, coda_getpages }, /* getpages */
142 { &vop_putpages_desc, coda_putpages }, /* putpages */
143 { NULL, NULL }
144};
145
146static void coda_print_vattr(struct vattr *);
147
148int (**coda_vnodeop_p)(void *);
149const struct vnodeopv_desc coda_vnodeop_opv_desc =
150 { &coda_vnodeop_p, coda_vnodeop_entries };
151
152/* Definitions of NetBSD vnodeop interfaces */
153
154/*
155 * A generic error routine. Return EIO without looking at arguments.
156 */
157int
158coda_vop_error(void *anon) {
159 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
160
161 if (codadebug) {
162 myprintf(("%s: Vnode operation %s called (error).\n",
163 __func__, (*desc)->vdesc_name));
164 }
165
166 return EIO;
167}
168
169/* A generic do-nothing. */
170int
171coda_vop_nop(void *anon) {
172 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
173
174 if (codadebug) {
175 myprintf(("Vnode operation %s called, but unsupported\n",
176 (*desc)->vdesc_name));
177 }
178 return (0);
179}
180
181int
182coda_vnodeopstats_init(void)
183{
184 int i;
185
186 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
187 coda_vnodeopstats[i].opcode = i;
188 coda_vnodeopstats[i].entries = 0;
189 coda_vnodeopstats[i].sat_intrn = 0;
190 coda_vnodeopstats[i].unsat_intrn = 0;
191 coda_vnodeopstats[i].gen_intrn = 0;
192 }
193
194 return 0;
195}
196
197/*
198 * XXX The entire relationship between VOP_OPEN and having a container
199 * file (via venus_open) needs to be reexamined. In particular, it's
200 * valid to open/mmap/close and then reference. Instead of doing
201 * VOP_OPEN when getpages needs a container, we should do the
202 * venus_open part, and record that the vnode has opened the container
203 * for getpages, and do the matching logical close on coda_inactive.
204 * Further, coda_rdwr needs a container file, and sometimes needs to
205 * do the equivalent of open (core dumps).
206 */
207/*
208 * coda_open calls Venus to return the device and inode of the
209 * container file, and then obtains a vnode for that file. The
210 * container vnode is stored in the coda vnode, and a reference is
211 * added for each open file.
212 */
213int
214coda_open(void *v)
215{
216 /*
217 * NetBSD can pass the O_EXCL flag in mode, even though the check
218 * has already happened. Venus defensively assumes that if open
219 * is passed the EXCL, it must be a bug. We strip the flag here.
220 */
221/* true args */
222 struct vop_open_args *ap = v;
223 vnode_t *vp = ap->a_vp;
224 struct cnode *cp = VTOC(vp);
225 int flag = ap->a_mode & (~O_EXCL);
226 kauth_cred_t cred = ap->a_cred;
227/* locals */
228 int error;
229 dev_t dev; /* container file device, inode, vnode */
230 ino_t inode;
231 vnode_t *container_vp;
232
233 MARK_ENTRY(CODA_OPEN_STATS);
234
235 KASSERT(VOP_ISLOCKED(vp));
236 /* Check for open of control file. */
237 if (IS_CTL_VP(vp)) {
238 /* if (WRITABLE(flag)) */
239 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
240 MARK_INT_FAIL(CODA_OPEN_STATS);
241 return(EACCES);
242 }
243 MARK_INT_SAT(CODA_OPEN_STATS);
244 return(0);
245 }
246
247 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
248 if (error)
249 return (error);
250 if (!error) {
251 CODADEBUG(CODA_OPEN, myprintf((
252 "%s: dev 0x%llx inode %llu result %d\n", __func__,
253 (unsigned long long)dev, (unsigned long long)inode, error));)
254 }
255
256 /*
257 * Obtain locked and referenced container vnode from container
258 * device/inode.
259 */
260 error = coda_grab_vnode(vp, dev, inode, &container_vp);
261 if (error)
262 return (error);
263
264 /* Save the vnode pointer for the container file. */
265 if (cp->c_ovp == NULL) {
266 cp->c_ovp = container_vp;
267 } else {
268 if (cp->c_ovp != container_vp)
269 /*
270 * Perhaps venus returned a different container, or
271 * something else went wrong.
272 */
273 panic("%s: cp->c_ovp != container_vp", __func__);
274 }
275 cp->c_ocount++;
276
277 /* Flush the attribute cache if writing the file. */
278 if (flag & FWRITE) {
279 cp->c_owrite++;
280 cp->c_flags &= ~C_VATTR;
281 }
282
283 /*
284 * Save the <device, inode> pair for the container file to speed
285 * up subsequent reads while closed (mmap, program execution).
286 * This is perhaps safe because venus will invalidate the node
287 * before changing the container file mapping.
288 */
289 cp->c_device = dev;
290 cp->c_inode = inode;
291
292 /* Open the container file. */
293 error = VOP_OPEN(container_vp, flag, cred);
294 /*
295 * Drop the lock on the container, after we have done VOP_OPEN
296 * (which requires a locked vnode).
297 */
298 VOP_UNLOCK(container_vp);
299 return(error);
300}
301
302/*
303 * Close the cache file used for I/O and notify Venus.
304 */
305int
306coda_close(void *v)
307{
308/* true args */
309 struct vop_close_args *ap = v;
310 vnode_t *vp = ap->a_vp;
311 struct cnode *cp = VTOC(vp);
312 int flag = ap->a_fflag;
313 kauth_cred_t cred = ap->a_cred;
314/* locals */
315 int error;
316
317 MARK_ENTRY(CODA_CLOSE_STATS);
318
319 /* Check for close of control file. */
320 if (IS_CTL_VP(vp)) {
321 MARK_INT_SAT(CODA_CLOSE_STATS);
322 return(0);
323 }
324
325 /*
326 * XXX The IS_UNMOUNTING part of this is very suspect.
327 */
328 if (IS_UNMOUNTING(cp)) {
329 if (cp->c_ovp) {
330#ifdef CODA_VERBOSE
331 printf("%s: destroying container %d, ufs vp %p of vp %p/cp %p\n",
332 __func__, vp->v_usecount, cp->c_ovp, vp, cp);
333#endif
334#ifdef hmm
335 vgone(cp->c_ovp);
336#else
337 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
338 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
339 vput(cp->c_ovp);
340#endif
341 } else {
342#ifdef CODA_VERBOSE
343 printf("%s: NO container vp %p/cp %p\n", __func__, vp, cp);
344#endif
345 }
346 return ENODEV;
347 }
348
349 /* Lock the container node, and VOP_CLOSE it. */
350 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
351 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
352 /*
353 * Drop the lock we just obtained, and vrele the container vnode.
354 * Decrement reference counts, and clear container vnode pointer on
355 * last close.
356 */
357 vput(cp->c_ovp);
358 if (flag & FWRITE)
359 --cp->c_owrite;
360 if (--cp->c_ocount == 0)
361 cp->c_ovp = NULL;
362
363 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
364
365 CODADEBUG(CODA_CLOSE, myprintf(("%s: result %d\n", __func__, error)); )
366 return(error);
367}
368
369int
370coda_read(void *v)
371{
372 struct vop_read_args *ap = v;
373
374 ENTRY;
375 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
376 ap->a_ioflag, ap->a_cred, curlwp));
377}
378
379int
380coda_write(void *v)
381{
382 struct vop_write_args *ap = v;
383
384 ENTRY;
385 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
386 ap->a_ioflag, ap->a_cred, curlwp));
387}
388
389int
390coda_rdwr(vnode_t *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
391 kauth_cred_t cred, struct lwp *l)
392{
393/* upcall decl */
394 /* NOTE: container file operation!!! */
395/* locals */
396 struct cnode *cp = VTOC(vp);
397 vnode_t *cfvp = cp->c_ovp;
398 struct proc *p = l->l_proc;
399 int opened_internally = 0;
400 int error = 0;
401
402 MARK_ENTRY(CODA_RDWR_STATS);
403
404 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
405 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
406 (long long) uiop->uio_offset)); )
407
408 /* Check for rdwr of control object. */
409 if (IS_CTL_VP(vp)) {
410 MARK_INT_FAIL(CODA_RDWR_STATS);
411 return(EINVAL);
412 }
413
414 /* Redirect the request to UFS. */
415
416 /*
417 * If file is not already open this must be a page
418 * {read,write} request. Iget the cache file's inode
419 * pointer if we still have its <device, inode> pair.
420 * Otherwise, we must do an internal open to derive the
421 * pair.
422 * XXX Integrate this into a coherent strategy for container
423 * file acquisition.
424 */
425 if (cfvp == NULL) {
426 /*
427 * If we're dumping core, do the internal open. Otherwise
428 * venus won't have the correct size of the core when
429 * it's completely written.
430 */
431 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
432#ifdef CODA_VERBOSE
433 printf("%s: grabbing container vnode, losing reference\n",
434 __func__);
435#endif
436 /* Get locked and refed vnode. */
437 error = coda_grab_vnode(vp, cp->c_device, cp->c_inode, &cfvp);
438 if (error) {
439 MARK_INT_FAIL(CODA_RDWR_STATS);
440 return(error);
441 }
442 /*
443 * Drop lock.
444 * XXX Where is reference released.
445 */
446 VOP_UNLOCK(cfvp);
447 }
448 else {
449#ifdef CODA_VERBOSE
450 printf("%s: internal VOP_OPEN\n", __func__);
451#endif
452 opened_internally = 1;
453 MARK_INT_GEN(CODA_OPEN_STATS);
454 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
455#ifdef CODA_VERBOSE
456 printf("%s: Internally Opening %p\n", __func__, vp);
457#endif
458 if (error) {
459 MARK_INT_FAIL(CODA_RDWR_STATS);
460 return(error);
461 }
462 cfvp = cp->c_ovp;
463 }
464 }
465
466 /* Have UFS handle the call. */
467 CODADEBUG(CODA_RDWR, myprintf(("%s: fid = %s, refcnt = %d\n", __func__,
468 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
469
470 if (rw == UIO_READ) {
471 error = VOP_READ(cfvp, uiop, ioflag, cred);
472 } else {
473 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
474 }
475
476 if (error)
477 MARK_INT_FAIL(CODA_RDWR_STATS);
478 else
479 MARK_INT_SAT(CODA_RDWR_STATS);
480
481 /* Do an internal close if necessary. */
482 if (opened_internally) {
483 MARK_INT_GEN(CODA_CLOSE_STATS);
484 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
485 }
486
487 /* Invalidate cached attributes if writing. */
488 if (rw == UIO_WRITE)
489 cp->c_flags &= ~C_VATTR;
490 return(error);
491}
492
493int
494coda_ioctl(void *v)
495{
496/* true args */
497 struct vop_ioctl_args *ap = v;
498 vnode_t *vp = ap->a_vp;
499 int com = ap->a_command;
500 void *data = ap->a_data;
501 int flag = ap->a_fflag;
502 kauth_cred_t cred = ap->a_cred;
503/* locals */
504 int error;
505 vnode_t *tvp;
506 struct PioctlData *iap = (struct PioctlData *)data;
507 namei_simple_flags_t sflags;
508
509 MARK_ENTRY(CODA_IOCTL_STATS);
510
511 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
512
513 /* Don't check for operation on a dying object, for ctlvp it
514 shouldn't matter */
515
516 /* Must be control object to succeed. */
517 if (!IS_CTL_VP(vp)) {
518 MARK_INT_FAIL(CODA_IOCTL_STATS);
519 CODADEBUG(CODA_IOCTL, myprintf(("%s error: vp != ctlvp", __func__));)
520 return (EOPNOTSUPP);
521 }
522 /* Look up the pathname. */
523
524 /* Should we use the name cache here? It would get it from
525 lookupname sooner or later anyway, right? */
526
527 sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
528 error = namei_simple_user(iap->path, sflags, &tvp);
529
530 if (error) {
531 MARK_INT_FAIL(CODA_IOCTL_STATS);
532 CODADEBUG(CODA_IOCTL, myprintf(("%s error: lookup returns %d\n",
533 __func__, error));)
534 return(error);
535 }
536
537 /*
538 * Make sure this is a coda style cnode, but it may be a
539 * different vfsp
540 */
541 /* XXX: this totally violates the comment about vtagtype in vnode.h */
542 if (tvp->v_tag != VT_CODA) {
543 vrele(tvp);
544 MARK_INT_FAIL(CODA_IOCTL_STATS);
545 CODADEBUG(CODA_IOCTL, myprintf(("%s error: %s not a coda object\n",
546 __func__, iap->path));)
547 return(EINVAL);
548 }
549
550 if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) {
551 vrele(tvp);
552 return(EINVAL);
553 }
554 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
555 cred, curlwp);
556
557 if (error)
558 MARK_INT_FAIL(CODA_IOCTL_STATS);
559 else
560 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
561
562 vrele(tvp);
563 return(error);
564}
565
566/*
567 * To reduce the cost of a user-level venus;we cache attributes in
568 * the kernel. Each cnode has storage allocated for an attribute. If
569 * c_vattr is valid, return a reference to it. Otherwise, get the
570 * attributes from venus and store them in the cnode. There is some
571 * question if this method is a security leak. But I think that in
572 * order to make this call, the user must have done a lookup and
573 * opened the file, and therefore should already have access.
574 */
575int
576coda_getattr(void *v)
577{
578/* true args */
579 struct vop_getattr_args *ap = v;
580 vnode_t *vp = ap->a_vp;
581 struct cnode *cp = VTOC(vp);
582 struct vattr *vap = ap->a_vap;
583 kauth_cred_t cred = ap->a_cred;
584/* locals */
585 int error;
586
587 MARK_ENTRY(CODA_GETATTR_STATS);
588
589 /* Check for getattr of control object. */
590 if (IS_CTL_VP(vp)) {
591 MARK_INT_FAIL(CODA_GETATTR_STATS);
592 return(ENOENT);
593 }
594
595 /* Check to see if the attributes have already been cached */
596 if (VALID_VATTR(cp)) {
597 CODADEBUG(CODA_GETATTR, { myprintf(("%s: attr cache hit: %s\n",
598 __func__, coda_f2s(&cp->c_fid)));})
599 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
600 coda_print_vattr(&cp->c_vattr); )
601
602 *vap = cp->c_vattr;
603 MARK_INT_SAT(CODA_GETATTR_STATS);
604 return(0);
605 }
606
607 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
608
609 if (!error) {
610 CODADEBUG(CODA_GETATTR, myprintf(("%s miss %s: result %d\n",
611 __func__, coda_f2s(&cp->c_fid), error)); )
612
613 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
614 coda_print_vattr(vap); )
615
616 /* If not open for write, store attributes in cnode */
617 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
618 cp->c_vattr = *vap;
619 cp->c_flags |= C_VATTR;
620 }
621
622 }
623 return(error);
624}
625
626int
627coda_setattr(void *v)
628{
629/* true args */
630 struct vop_setattr_args *ap = v;
631 vnode_t *vp = ap->a_vp;
632 struct cnode *cp = VTOC(vp);
633 struct vattr *vap = ap->a_vap;
634 kauth_cred_t cred = ap->a_cred;
635/* locals */
636 int error;
637
638 MARK_ENTRY(CODA_SETATTR_STATS);
639
640 /* Check for setattr of control object. */
641 if (IS_CTL_VP(vp)) {
642 MARK_INT_FAIL(CODA_SETATTR_STATS);
643 return(ENOENT);
644 }
645
646 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
647 coda_print_vattr(vap);
648 }
649 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
650
651 if (!error)
652 cp->c_flags &= ~C_VATTR;
653
654 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
655 return(error);
656}
657
658int
659coda_access(void *v)
660{
661/* true args */
662 struct vop_access_args *ap = v;
663 vnode_t *vp = ap->a_vp;
664 struct cnode *cp = VTOC(vp);
665 int mode = ap->a_mode;
666 kauth_cred_t cred = ap->a_cred;
667/* locals */
668 int error;
669
670 MARK_ENTRY(CODA_ACCESS_STATS);
671
672 /* Check for access of control object. Only read access is
673 allowed on it. */
674 if (IS_CTL_VP(vp)) {
675 /* bogus hack - all will be marked as successes */
676 MARK_INT_SAT(CODA_ACCESS_STATS);
677 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
678 ? 0 : EACCES);
679 }
680
681 /*
682 * if the file is a directory, and we are checking exec (eg lookup)
683 * access, and the file is in the namecache, then the user must have
684 * lookup access to it.
685 */
686 if (coda_access_cache) {
687 if ((vp->v_type == VDIR) && (mode & VEXEC)) {
688 if (coda_nc_lookup(cp, ".", 1, cred)) {
689 MARK_INT_SAT(CODA_ACCESS_STATS);
690 return(0); /* it was in the cache */
691 }
692 }
693 }
694
695 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
696
697 return(error);
698}
699
700/*
701 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
702 * done. If a buffer has been saved in anticipation of a coda_create or
703 * a coda_remove, delete it.
704 */
705/* ARGSUSED */
706int
707coda_abortop(void *v)
708{
709/* true args */
710 struct vop_abortop_args /* {
711 vnode_t *a_dvp;
712 struct componentname *a_cnp;
713 } */ *ap = v;
714
715 (void)ap;
716/* upcall decl */
717/* locals */
718
719 return (0);
720}
721
722int
723coda_readlink(void *v)
724{
725/* true args */
726 struct vop_readlink_args *ap = v;
727 vnode_t *vp = ap->a_vp;
728 struct cnode *cp = VTOC(vp);
729 struct uio *uiop = ap->a_uio;
730 kauth_cred_t cred = ap->a_cred;
731/* locals */
732 struct lwp *l = curlwp;
733 int error;
734 char *str;
735 int len;
736
737 MARK_ENTRY(CODA_READLINK_STATS);
738
739 /* Check for readlink of control object. */
740 if (IS_CTL_VP(vp)) {
741 MARK_INT_FAIL(CODA_READLINK_STATS);
742 return(ENOENT);
743 }
744
745 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
746 uiop->uio_rw = UIO_READ;
747 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
748 if (error)
749 MARK_INT_FAIL(CODA_READLINK_STATS);
750 else
751 MARK_INT_SAT(CODA_READLINK_STATS);
752 return(error);
753 }
754
755 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
756
757 if (!error) {
758 uiop->uio_rw = UIO_READ;
759 error = uiomove(str, len, uiop);
760
761 if (coda_symlink_cache) {
762 cp->c_symlink = str;
763 cp->c_symlen = len;
764 cp->c_flags |= C_SYMLINK;
765 } else
766 CODA_FREE(str, len);
767 }
768
769 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
770 return(error);
771}
772
773int
774coda_fsync(void *v)
775{
776/* true args */
777 struct vop_fsync_args *ap = v;
778 vnode_t *vp = ap->a_vp;
779 struct cnode *cp = VTOC(vp);
780 kauth_cred_t cred = ap->a_cred;
781/* locals */
782 vnode_t *convp = cp->c_ovp;
783 int error;
784
785 MARK_ENTRY(CODA_FSYNC_STATS);
786
787 /* Check for fsync on an unmounting object */
788 /* The NetBSD kernel, in its infinite wisdom, can try to fsync
789 * after an unmount has been initiated. This is a Bad Thing,
790 * which we have to avoid. Not a legitimate failure for stats.
791 */
792 if (IS_UNMOUNTING(cp)) {
793 return(ENODEV);
794 }
795
796 /* Check for fsync of control object or unitialized cnode. */
797 if (IS_CTL_VP(vp) || vp->v_type == VNON) {
798 MARK_INT_SAT(CODA_FSYNC_STATS);
799 return(0);
800 }
801
802 if (convp)
803 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
804
805 /*
806 * We can expect fsync on any vnode at all if venus is pruging it.
807 * Venus can't very well answer the fsync request, now can it?
808 * Hopefully, it won't have to, because hopefully, venus preserves
809 * the (possibly untrue) invariant that it never purges an open
810 * vnode. Hopefully.
811 */
812 if (cp->c_flags & C_PURGING) {
813 return(0);
814 }
815
816 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
817
818 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); )
819 return(error);
820}
821
822/*
823 * vp is locked on entry, and we must unlock it.
824 * XXX This routine is suspect and probably needs rewriting.
825 */
826int
827coda_inactive(void *v)
828{
829/* true args */
830 struct vop_inactive_args *ap = v;
831 vnode_t *vp = ap->a_vp;
832 struct cnode *cp = VTOC(vp);
833 kauth_cred_t cred __unused = NULL;
834
835 /* We don't need to send inactive to venus - DCS */
836 MARK_ENTRY(CODA_INACTIVE_STATS);
837
838 if (IS_CTL_VP(vp)) {
839 MARK_INT_SAT(CODA_INACTIVE_STATS);
840 VOP_UNLOCK(vp);
841 return 0;
842 }
843
844 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
845 coda_f2s(&cp->c_fid), vp->v_mount));)
846
847 if (vp->v_mount->mnt_data == NULL) {
848 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
849 panic("badness in coda_inactive");
850 }
851
852#ifdef CODA_VERBOSE
853 /* Sanity checks that perhaps should be panic. */
854 if (vp->v_usecount > 1)
855 printf("%s: %p usecount %d\n", __func__, vp, vp->v_usecount);
856 if (cp->c_ovp != NULL)
857 printf("%s: %p ovp != NULL\n", __func__, vp);
858#endif
859 /* XXX Do we need to VOP_CLOSE container vnodes? */
860 VOP_UNLOCK(vp);
861 if (!IS_UNMOUNTING(cp))
862 *ap->a_recycle = true;
863
864 MARK_INT_SAT(CODA_INACTIVE_STATS);
865 return(0);
866}
867
868/*
869 * Coda does not use the normal namecache, but a private version.
870 * Consider how to use the standard facility instead.
871 */
872int
873coda_lookup(void *v)
874{
875/* true args */
876 struct vop_lookup_v2_args *ap = v;
877 /* (locked) vnode of dir in which to do lookup */
878 vnode_t *dvp = ap->a_dvp;
879 struct cnode *dcp = VTOC(dvp);
880 /* output variable for result */
881 vnode_t **vpp = ap->a_vpp;
882 /* name to lookup */
883 struct componentname *cnp = ap->a_cnp;
884 kauth_cred_t cred = cnp->cn_cred;
885 struct lwp *l = curlwp;
886/* locals */
887 struct cnode *cp;
888 const char *nm = cnp->cn_nameptr;
889 int len = cnp->cn_namelen;
890 CodaFid VFid;
891 int vtype;
892 int error = 0;
893
894 MARK_ENTRY(CODA_LOOKUP_STATS);
895
896 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s in %s\n", __func__,
897 nm, coda_f2s(&dcp->c_fid)));)
898
899 /*
900 * XXX componentname flags in MODMASK are not handled at all
901 */
902
903 /*
904 * The overall strategy is to switch on the lookup type and get a
905 * result vnode that is vref'd but not locked.
906 */
907
908 /* Check for lookup of control object. */
909 if (IS_CTL_NAME(dvp, nm, len)) {
910 *vpp = coda_ctlvp;
911 vref(*vpp);
912 MARK_INT_SAT(CODA_LOOKUP_STATS);
913 goto exit;
914 }
915
916 /* Avoid trying to hand venus an unreasonably long name. */
917 if (len+1 > CODA_MAXNAMLEN) {
918 MARK_INT_FAIL(CODA_LOOKUP_STATS);
919 CODADEBUG(CODA_LOOKUP, myprintf(("%s: name too long:, %s (%s)\n",
920 __func__, coda_f2s(&dcp->c_fid), nm));)
921 *vpp = (vnode_t *)0;
922 error = EINVAL;
923 goto exit;
924 }
925
926 /*
927 * Try to resolve the lookup in the minicache. If that fails, ask
928 * venus to do the lookup. XXX The interaction between vnode
929 * locking and any locking that coda does is not clear.
930 */
931 cp = coda_nc_lookup(dcp, nm, len, cred);
932 if (cp) {
933 *vpp = CTOV(cp);
934 vref(*vpp);
935 CODADEBUG(CODA_LOOKUP,
936 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
937 } else {
938 /* The name wasn't cached, so ask Venus. */
939 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid,
940 &vtype);
941
942 if (error) {
943 MARK_INT_FAIL(CODA_LOOKUP_STATS);
944 CODADEBUG(CODA_LOOKUP, myprintf(("%s: lookup error on %s (%s)%d\n",
945 __func__, coda_f2s(&dcp->c_fid), nm, error));)
946 *vpp = (vnode_t *)0;
947 } else {
948 MARK_INT_SAT(CODA_LOOKUP_STATS);
949 CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s type %o result %d\n",
950 __func__, coda_f2s(&VFid), vtype, error)); )
951
952 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
953 *vpp = CTOV(cp);
954 /* vpp is now vrefed. */
955
956 /*
957 * Unless this vnode is marked CODA_NOCACHE, enter it into
958 * the coda name cache to avoid a future venus round-trip.
959 * XXX Interaction with componentname NOCACHE is unclear.
960 */
961 if (!(vtype & CODA_NOCACHE))
962 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
963 }
964 }
965
966 exit:
967 /*
968 * If we are creating, and this was the last name to be looked up,
969 * and the error was ENOENT, then make the leaf NULL and return
970 * success.
971 * XXX Check against new lookup rules.
972 */
973 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
974 && (cnp->cn_flags & ISLASTCN)
975 && (error == ENOENT))
976 {
977 error = EJUSTRETURN;
978 *ap->a_vpp = NULL;
979 }
980
981 return(error);
982}
983
984/*ARGSUSED*/
985int
986coda_create(void *v)
987{
988/* true args */
989 struct vop_create_v3_args *ap = v;
990 vnode_t *dvp = ap->a_dvp;
991 struct cnode *dcp = VTOC(dvp);
992 struct vattr *va = ap->a_vap;
993 int exclusive = 1;
994 int mode = ap->a_vap->va_mode;
995 vnode_t **vpp = ap->a_vpp;
996 struct componentname *cnp = ap->a_cnp;
997 kauth_cred_t cred = cnp->cn_cred;
998 struct lwp *l = curlwp;
999/* locals */
1000 int error;
1001 struct cnode *cp;
1002 const char *nm = cnp->cn_nameptr;
1003 int len = cnp->cn_namelen;
1004 CodaFid VFid;
1005 struct vattr attr;
1006
1007 MARK_ENTRY(CODA_CREATE_STATS);
1008
1009 /* All creates are exclusive XXX */
1010 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1011
1012 /* Check for create of control object. */
1013 if (IS_CTL_NAME(dvp, nm, len)) {
1014 *vpp = (vnode_t *)0;
1015 MARK_INT_FAIL(CODA_CREATE_STATS);
1016 return(EACCES);
1017 }
1018
1019 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1020
1021 if (!error) {
1022
1023 /*
1024 * XXX Violation of venus/kernel invariants is a difficult case,
1025 * but venus should not be able to cause a panic.
1026 */
1027 /* If this is an exclusive create, panic if the file already exists. */
1028 /* Venus should have detected the file and reported EEXIST. */
1029
1030 if ((exclusive == 1) &&
1031 (coda_find(&VFid) != NULL))
1032 panic("cnode existed for newly created file!");
1033
1034 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1035 *vpp = CTOV(cp);
1036
1037 /* XXX vnodeops doesn't say this argument can be changed. */
1038 /* Update va to reflect the new attributes. */
1039 (*va) = attr;
1040
1041 /* Update the attribute cache and mark it as valid */
1042 if (coda_attr_cache) {
1043 VTOC(*vpp)->c_vattr = attr;
1044 VTOC(*vpp)->c_flags |= C_VATTR;
1045 }
1046
1047 /* Invalidate parent's attr cache (modification time has changed). */
1048 VTOC(dvp)->c_flags &= ~C_VATTR;
1049
1050 /* enter the new vnode in the Name Cache */
1051 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1052
1053 CODADEBUG(CODA_CREATE, myprintf(("%s: %s, result %d\n", __func__,
1054 coda_f2s(&VFid), error)); )
1055 } else {
1056 *vpp = (vnode_t *)0;
1057 CODADEBUG(CODA_CREATE, myprintf(("%s: create error %d\n", __func__,
1058 error));)
1059 }
1060
1061 if (!error) {
1062#ifdef CODA_VERBOSE
1063 if ((cnp->cn_flags & LOCKLEAF) == 0)
1064 /* This should not happen; flags are for lookup only. */
1065 printf("%s: LOCKLEAF not set!\n", __func__);
1066#endif
1067 }
1068
1069 return(error);
1070}
1071
1072int
1073coda_remove(void *v)
1074{
1075/* true args */
1076 struct vop_remove_args *ap = v;
1077 vnode_t *dvp = ap->a_dvp;
1078 struct cnode *cp = VTOC(dvp);
1079 vnode_t *vp = ap->a_vp;
1080 struct componentname *cnp = ap->a_cnp;
1081 kauth_cred_t cred = cnp->cn_cred;
1082 struct lwp *l = curlwp;
1083/* locals */
1084 int error;
1085 const char *nm = cnp->cn_nameptr;
1086 int len = cnp->cn_namelen;
1087 struct cnode *tp;
1088
1089 MARK_ENTRY(CODA_REMOVE_STATS);
1090
1091 CODADEBUG(CODA_REMOVE, myprintf(("%s: %s in %s\n", __func__,
1092 nm, coda_f2s(&cp->c_fid)));)
1093
1094 /* Remove the file's entry from the CODA Name Cache */
1095 /* We're being conservative here, it might be that this person
1096 * doesn't really have sufficient access to delete the file
1097 * but we feel zapping the entry won't really hurt anyone -- dcs
1098 */
1099 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1100 * exist, and one is removed, the link count on the other will be
1101 * off by 1. We could either invalidate the attrs if cached, or
1102 * fix them. I'll try to fix them. DCS 11/8/94
1103 */
1104 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1105 if (tp) {
1106 if (VALID_VATTR(tp)) { /* If attrs are cached */
1107 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1108 tp->c_vattr.va_nlink--;
1109 }
1110 }
1111
1112 coda_nc_zapfile(VTOC(dvp), nm, len);
1113 /* No need to flush it if it doesn't exist! */
1114 }
1115 /* Invalidate the parent's attr cache, the modification time has changed */
1116 VTOC(dvp)->c_flags &= ~C_VATTR;
1117
1118 /* Check for remove of control object. */
1119 if (IS_CTL_NAME(dvp, nm, len)) {
1120 MARK_INT_FAIL(CODA_REMOVE_STATS);
1121 return(ENOENT);
1122 }
1123
1124 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1125
1126 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1127
1128 /*
1129 * Unlock parent and child (avoiding double if ".").
1130 */
1131 if (dvp == vp) {
1132 vrele(vp);
1133 } else {
1134 vput(vp);
1135 }
1136 vput(dvp);
1137
1138 return(error);
1139}
1140
1141/*
1142 * dvp is the directory where the link is to go, and is locked.
1143 * vp is the object to be linked to, and is unlocked.
1144 * At exit, we must unlock dvp, and vput dvp.
1145 */
1146int
1147coda_link(void *v)
1148{
1149/* true args */
1150 struct vop_link_v2_args *ap = v;
1151 vnode_t *vp = ap->a_vp;
1152 struct cnode *cp = VTOC(vp);
1153 vnode_t *dvp = ap->a_dvp;
1154 struct cnode *dcp = VTOC(dvp);
1155 struct componentname *cnp = ap->a_cnp;
1156 kauth_cred_t cred = cnp->cn_cred;
1157 struct lwp *l = curlwp;
1158/* locals */
1159 int error;
1160 const char *nm = cnp->cn_nameptr;
1161 int len = cnp->cn_namelen;
1162
1163 MARK_ENTRY(CODA_LINK_STATS);
1164
1165 if (codadebug & CODADBGMSK(CODA_LINK)) {
1166
1167 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1168 myprintf(("%s: dvp fid: %s)\n", __func__, coda_f2s(&dcp->c_fid)));
1169
1170 }
1171 if (codadebug & CODADBGMSK(CODA_LINK)) {
1172 myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1173 myprintf(("%s: dvp fid: %s\n", __func__, coda_f2s(&dcp->c_fid)));
1174
1175 }
1176
1177 /* Check for link to/from control object. */
1178 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1179 MARK_INT_FAIL(CODA_LINK_STATS);
1180 return(EACCES);
1181 }
1182
1183 /* If linking . to a name, error out earlier. */
1184 if (vp == dvp) {
1185#ifdef CODA_VERBOSE
1186 printf("%s coda_link vp==dvp\n", __func__);
1187#endif
1188 error = EISDIR;
1189 goto exit;
1190 }
1191
1192 /* XXX Why does venus_link need the vnode to be locked?*/
1193 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1194#ifdef CODA_VERBOSE
1195 printf("%s: couldn't lock vnode %p\n", __func__, vp);
1196#endif
1197 error = EFAULT; /* XXX better value */
1198 goto exit;
1199 }
1200 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1201 VOP_UNLOCK(vp);
1202
1203 /* Invalidate parent's attr cache (the modification time has changed). */
1204 VTOC(dvp)->c_flags &= ~C_VATTR;
1205 /* Invalidate child's attr cache (XXX why). */
1206 VTOC(vp)->c_flags &= ~C_VATTR;
1207
1208 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1209
1210exit:
1211 return(error);
1212}
1213
1214int
1215coda_rename(void *v)
1216{
1217/* true args */
1218 struct vop_rename_args *ap = v;
1219 vnode_t *odvp = ap->a_fdvp;
1220 struct cnode *odcp = VTOC(odvp);
1221 struct componentname *fcnp = ap->a_fcnp;
1222 vnode_t *ndvp = ap->a_tdvp;
1223 struct cnode *ndcp = VTOC(ndvp);
1224 struct componentname *tcnp = ap->a_tcnp;
1225 kauth_cred_t cred = fcnp->cn_cred;
1226 struct lwp *l = curlwp;
1227/* true args */
1228 int error;
1229 const char *fnm = fcnp->cn_nameptr;
1230 int flen = fcnp->cn_namelen;
1231 const char *tnm = tcnp->cn_nameptr;
1232 int tlen = tcnp->cn_namelen;
1233
1234 MARK_ENTRY(CODA_RENAME_STATS);
1235
1236 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1237 This could be Bad. XXX */
1238#ifdef OLD_DIAGNOSTIC
1239 if ((fcnp->cn_cred != tcnp->cn_cred)
1240 || (fcnp->cn_lwp != tcnp->cn_lwp))
1241 {
1242 panic("%s: component names don't agree", __func__);
1243 }
1244#endif
1245
1246 /* Check for rename involving control object. */
1247 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1248 MARK_INT_FAIL(CODA_RENAME_STATS);
1249 return(EACCES);
1250 }
1251
1252 /* Problem with moving directories -- need to flush entry for .. */
1253 if (odvp != ndvp) {
1254 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1255 if (ovcp) {
1256 vnode_t *ovp = CTOV(ovcp);
1257 if ((ovp) &&
1258 (ovp->v_type == VDIR)) /* If it's a directory */
1259 coda_nc_zapfile(VTOC(ovp),"..", 2);
1260 }
1261 }
1262
1263 /* Remove the entries for both source and target files */
1264 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1265 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1266
1267 /* Invalidate the parent's attr cache, the modification time has changed */
1268 VTOC(odvp)->c_flags &= ~C_VATTR;
1269 VTOC(ndvp)->c_flags &= ~C_VATTR;
1270
1271 if (flen+1 > CODA_MAXNAMLEN) {
1272 MARK_INT_FAIL(CODA_RENAME_STATS);
1273 error = EINVAL;
1274 goto exit;
1275 }
1276
1277 if (tlen+1 > CODA_MAXNAMLEN) {
1278 MARK_INT_FAIL(CODA_RENAME_STATS);
1279 error = EINVAL;
1280 goto exit;
1281 }
1282
1283 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1284
1285 exit:
1286 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1287 /* XXX - do we need to call cache pureg on the moved vnode? */
1288 cache_purge(ap->a_fvp);
1289
1290 /* It seems to be incumbent on us to drop locks on all four vnodes */
1291 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1292
1293 vrele(ap->a_fvp);
1294 vrele(odvp);
1295
1296 if (ap->a_tvp) {
1297 if (ap->a_tvp == ndvp) {
1298 vrele(ap->a_tvp);
1299 } else {
1300 vput(ap->a_tvp);
1301 }
1302 }
1303
1304 vput(ndvp);
1305 return(error);
1306}
1307
1308int
1309coda_mkdir(void *v)
1310{
1311/* true args */
1312 struct vop_mkdir_v3_args *ap = v;
1313 vnode_t *dvp = ap->a_dvp;
1314 struct cnode *dcp = VTOC(dvp);
1315 struct componentname *cnp = ap->a_cnp;
1316 struct vattr *va = ap->a_vap;
1317 vnode_t **vpp = ap->a_vpp;
1318 kauth_cred_t cred = cnp->cn_cred;
1319 struct lwp *l = curlwp;
1320/* locals */
1321 int error;
1322 const char *nm = cnp->cn_nameptr;
1323 int len = cnp->cn_namelen;
1324 struct cnode *cp;
1325 CodaFid VFid;
1326 struct vattr ova;
1327
1328 MARK_ENTRY(CODA_MKDIR_STATS);
1329
1330 /* Check for mkdir of target object. */
1331 if (IS_CTL_NAME(dvp, nm, len)) {
1332 *vpp = (vnode_t *)0;
1333 MARK_INT_FAIL(CODA_MKDIR_STATS);
1334 return(EACCES);
1335 }
1336
1337 if (len+1 > CODA_MAXNAMLEN) {
1338 *vpp = (vnode_t *)0;
1339 MARK_INT_FAIL(CODA_MKDIR_STATS);
1340 return(EACCES);
1341 }
1342
1343 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1344
1345 if (!error) {
1346 if (coda_find(&VFid) != NULL)
1347 panic("cnode existed for newly created directory!");
1348
1349
1350 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1351 *vpp = CTOV(cp);
1352
1353 /* enter the new vnode in the Name Cache */
1354 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1355
1356 /* as a side effect, enter "." and ".." for the directory */
1357 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1358 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1359
1360 if (coda_attr_cache) {
1361 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1362 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1363 }
1364
1365 /* Invalidate the parent's attr cache, the modification time has changed */
1366 VTOC(dvp)->c_flags &= ~C_VATTR;
1367
1368 CODADEBUG( CODA_MKDIR, myprintf(("%s: %s result %d\n", __func__,
1369 coda_f2s(&VFid), error)); )
1370 } else {
1371 *vpp = (vnode_t *)0;
1372 CODADEBUG(CODA_MKDIR, myprintf(("%s error %d\n", __func__, error));)
1373 }
1374
1375 return(error);
1376}
1377
1378int
1379coda_rmdir(void *v)
1380{
1381/* true args */
1382 struct vop_rmdir_args *ap = v;
1383 vnode_t *dvp = ap->a_dvp;
1384 struct cnode *dcp = VTOC(dvp);
1385 vnode_t *vp = ap->a_vp;
1386 struct componentname *cnp = ap->a_cnp;
1387 kauth_cred_t cred = cnp->cn_cred;
1388 struct lwp *l = curlwp;
1389/* true args */
1390 int error;
1391 const char *nm = cnp->cn_nameptr;
1392 int len = cnp->cn_namelen;
1393 struct cnode *cp;
1394
1395 MARK_ENTRY(CODA_RMDIR_STATS);
1396
1397 /* Check for rmdir of control object. */
1398 if (IS_CTL_NAME(dvp, nm, len)) {
1399 MARK_INT_FAIL(CODA_RMDIR_STATS);
1400 return(ENOENT);
1401 }
1402
1403 /* Can't remove . in self. */
1404 if (dvp == vp) {
1405#ifdef CODA_VERBOSE
1406 printf("%s: dvp == vp\n", __func__);
1407#endif
1408 error = EINVAL;
1409 goto exit;
1410 }
1411
1412 /*
1413 * The caller may not have adequate permissions, and the venus
1414 * operation may fail, but it doesn't hurt from a correctness
1415 * viewpoint to invalidate cache entries.
1416 * XXX Why isn't this done after the venus_rmdir call?
1417 */
1418 /* Look up child in name cache (by name, from parent). */
1419 cp = coda_nc_lookup(dcp, nm, len, cred);
1420 /* If found, remove all children of the child (., ..). */
1421 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1422
1423 /* Remove child's own entry. */
1424 coda_nc_zapfile(dcp, nm, len);
1425
1426 /* Invalidate parent's attr cache (the modification time has changed). */
1427 dcp->c_flags &= ~C_VATTR;
1428
1429 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1430
1431 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1432
1433exit:
1434 /* vput both vnodes */
1435 vput(dvp);
1436 if (dvp == vp) {
1437 vrele(vp);
1438 } else {
1439 vput(vp);
1440 }
1441
1442 return(error);
1443}
1444
1445int
1446coda_symlink(void *v)
1447{
1448/* true args */
1449 struct vop_symlink_v3_args *ap = v;
1450 vnode_t *dvp = ap->a_dvp;
1451 struct cnode *dcp = VTOC(dvp);
1452 /* a_vpp is used in place below */
1453 struct componentname *cnp = ap->a_cnp;
1454 struct vattr *tva = ap->a_vap;
1455 char *path = ap->a_target;
1456 kauth_cred_t cred = cnp->cn_cred;
1457 struct lwp *l = curlwp;
1458/* locals */
1459 int error;
1460 u_long saved_cn_flags;
1461 const char *nm = cnp->cn_nameptr;
1462 int len = cnp->cn_namelen;
1463 int plen = strlen(path);
1464
1465 /*
1466 * Here's the strategy for the moment: perform the symlink, then
1467 * do a lookup to grab the resulting vnode. I know this requires
1468 * two communications with Venus for a new sybolic link, but
1469 * that's the way the ball bounces. I don't yet want to change
1470 * the way the Mach symlink works. When Mach support is
1471 * deprecated, we should change symlink so that the common case
1472 * returns the resultant vnode in a vpp argument.
1473 */
1474
1475 MARK_ENTRY(CODA_SYMLINK_STATS);
1476
1477 /* Check for symlink of control object. */
1478 if (IS_CTL_NAME(dvp, nm, len)) {
1479 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1480 error = EACCES;
1481 goto exit;
1482 }
1483
1484 if (plen+1 > CODA_MAXPATHLEN) {
1485 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1486 error = EINVAL;
1487 goto exit;
1488 }
1489
1490 if (len+1 > CODA_MAXNAMLEN) {
1491 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1492 error = EINVAL;
1493 goto exit;
1494 }
1495
1496 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1497
1498 /* Invalidate the parent's attr cache (modification time has changed). */
1499 dcp->c_flags &= ~C_VATTR;
1500
1501 if (!error) {
1502 /*
1503 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1504 * these are defined only for VOP_LOOKUP. We desire to reuse
1505 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1506 * stray flags passed to us. Such stray flags can occur because
1507 * sys_symlink makes a namei call and then reuses the
1508 * componentname structure.
1509 */
1510 /*
1511 * XXX Arguably we should create our own componentname structure
1512 * and not reuse the one that was passed in.
1513 */
1514 saved_cn_flags = cnp->cn_flags;
1515 cnp->cn_flags &= ~(MODMASK | OPMASK);
1516 cnp->cn_flags |= LOOKUP;
1517 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1518 cnp->cn_flags = saved_cn_flags;
1519 }
1520
1521 exit:
1522 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1523 return(error);
1524}
1525
1526/*
1527 * Read directory entries.
1528 */
1529int
1530coda_readdir(void *v)
1531{
1532/* true args */
1533 struct vop_readdir_args *ap = v;
1534 vnode_t *vp = ap->a_vp;
1535 struct cnode *cp = VTOC(vp);
1536 struct uio *uiop = ap->a_uio;
1537 kauth_cred_t cred = ap->a_cred;
1538 int *eofflag = ap->a_eofflag;
1539 off_t **cookies = ap->a_cookies;
1540 int *ncookies = ap->a_ncookies;
1541/* upcall decl */
1542/* locals */
1543 int error = 0;
1544
1545 MARK_ENTRY(CODA_READDIR_STATS);
1546
1547 CODADEBUG(CODA_READDIR, myprintf(("%s: (%p, %lu, %lld)\n", __func__,
1548 uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
1549 (long long) uiop->uio_offset)); )
1550
1551 /* Check for readdir of control object. */
1552 if (IS_CTL_VP(vp)) {
1553 MARK_INT_FAIL(CODA_READDIR_STATS);
1554 return(ENOENT);
1555 }
1556
1557 {
1558 /* Redirect the request to UFS. */
1559
1560 /* If directory is not already open do an "internal open" on it. */
1561 int opened_internally = 0;
1562 if (cp->c_ovp == NULL) {
1563 opened_internally = 1;
1564 MARK_INT_GEN(CODA_OPEN_STATS);
1565 error = VOP_OPEN(vp, FREAD, cred);
1566#ifdef CODA_VERBOSE
1567 printf("%s: Internally Opening %p\n", __func__, vp);
1568#endif
1569 if (error) return(error);
1570 } else
1571 vp = cp->c_ovp;
1572
1573 /* Have UFS handle the call. */
1574 CODADEBUG(CODA_READDIR, myprintf(("%s: fid = %s, refcnt = %d\n",
1575 __func__, coda_f2s(&cp->c_fid), vp->v_usecount)); )
1576 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1577 if (error)
1578 MARK_INT_FAIL(CODA_READDIR_STATS);
1579 else
1580 MARK_INT_SAT(CODA_READDIR_STATS);
1581
1582 /* Do an "internal close" if necessary. */
1583 if (opened_internally) {
1584 MARK_INT_GEN(CODA_CLOSE_STATS);
1585 (void)VOP_CLOSE(vp, FREAD, cred);
1586 }
1587 }
1588
1589 return(error);
1590}
1591
1592/*
1593 * Convert from file system blocks to device blocks
1594 */
1595int
1596coda_bmap(void *v)
1597{
1598 /* XXX on the global proc */
1599/* true args */
1600 struct vop_bmap_args *ap = v;
1601 vnode_t *vp __unused = ap->a_vp; /* file's vnode */
1602 daddr_t bn __unused = ap->a_bn; /* fs block number */
1603 vnode_t **vpp = ap->a_vpp; /* RETURN vp of device */
1604 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */
1605 struct lwp *l __unused = curlwp;
1606/* upcall decl */
1607/* locals */
1608
1609 *vpp = (vnode_t *)0;
1610 myprintf(("coda_bmap called!\n"));
1611 return(EINVAL);
1612}
1613
1614/*
1615 * I don't think the following two things are used anywhere, so I've
1616 * commented them out
1617 *
1618 * struct buf *async_bufhead;
1619 * int async_daemon_count;
1620 */
1621int
1622coda_strategy(void *v)
1623{
1624/* true args */
1625 struct vop_strategy_args *ap = v;
1626 struct buf *bp __unused = ap->a_bp;
1627 struct lwp *l __unused = curlwp;
1628/* upcall decl */
1629/* locals */
1630
1631 myprintf(("coda_strategy called! "));
1632 return(EINVAL);
1633}
1634
1635int
1636coda_reclaim(void *v)
1637{
1638/* true args */
1639 struct vop_reclaim_args *ap = v;
1640 vnode_t *vp = ap->a_vp;
1641 struct cnode *cp = VTOC(vp);
1642/* upcall decl */
1643/* locals */
1644
1645/*
1646 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1647 */
1648 ENTRY;
1649
1650 if (IS_UNMOUNTING(cp)) {
1651#ifdef DEBUG
1652 if (VTOC(vp)->c_ovp) {
1653 if (IS_UNMOUNTING(cp))
1654 printf("%s: c_ovp not void: vp %p, cp %p\n", __func__, vp, cp);
1655 }
1656#endif
1657 } else {
1658#ifdef OLD_DIAGNOSTIC
1659 if (vp->v_usecount != 0)
1660 print("%s: pushing active %p\n", __func__, vp);
1661 if (VTOC(vp)->c_ovp) {
1662 panic("%s: c_ovp not void", __func__);
1663 }
1664#endif
1665 }
1666 /* If an array has been allocated to hold the symlink, deallocate it */
1667 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
1668 if (cp->c_symlink == NULL)
1669 panic("%s: null symlink pointer in cnode", __func__);
1670
1671 CODA_FREE(cp->c_symlink, cp->c_symlen);
1672 cp->c_flags &= ~C_SYMLINK;
1673 cp->c_symlen = 0;
1674 }
1675
1676 mutex_enter(vp->v_interlock);
1677 mutex_enter(&cp->c_lock);
1678 SET_VTOC(vp) = NULL;
1679 mutex_exit(&cp->c_lock);
1680 mutex_exit(vp->v_interlock);
1681 mutex_destroy(&cp->c_lock);
1682 kmem_free(cp, sizeof(*cp));
1683
1684 return (0);
1685}
1686
1687int
1688coda_lock(void *v)
1689{
1690/* true args */
1691 struct vop_lock_args *ap = v;
1692 vnode_t *vp = ap->a_vp;
1693 struct cnode *cp = VTOC(vp);
1694/* upcall decl */
1695/* locals */
1696
1697 ENTRY;
1698
1699 if (coda_lockdebug) {
1700 myprintf(("Attempting lock on %s\n",
1701 coda_f2s(&cp->c_fid)));
1702 }
1703
1704 return genfs_lock(v);
1705}
1706
1707int
1708coda_unlock(void *v)
1709{
1710/* true args */
1711 struct vop_unlock_args *ap = v;
1712 vnode_t *vp = ap->a_vp;
1713 struct cnode *cp = VTOC(vp);
1714/* upcall decl */
1715/* locals */
1716
1717 ENTRY;
1718 if (coda_lockdebug) {
1719 myprintf(("Attempting unlock on %s\n",
1720 coda_f2s(&cp->c_fid)));
1721 }
1722
1723 return genfs_unlock(v);
1724}
1725
1726int
1727coda_islocked(void *v)
1728{
1729/* true args */
1730 ENTRY;
1731
1732 return genfs_islocked(v);
1733}
1734
1735/*
1736 * Given a device and inode, obtain a locked vnode. One reference is
1737 * obtained and passed back to the caller.
1738 */
1739int
1740coda_grab_vnode(vnode_t *uvp, dev_t dev, ino_t ino, vnode_t **vpp)
1741{
1742 int error;
1743 struct mount *mp;
1744
1745 /* Obtain mount point structure from device. */
1746 if (!(mp = devtomp(dev))) {
1747 myprintf(("%s: devtomp(0x%llx) returns NULL\n", __func__,
1748 (unsigned long long)dev));
1749 return(ENXIO);
1750 }
1751
1752 /*
1753 * Obtain vnode from mount point and inode.
1754 */
1755 error = VFS_VGET(mp, ino, vpp);
1756 if (error) {
1757 myprintf(("%s: iget/vget(0x%llx, %llu) returns %p, err %d\n", __func__,
1758 (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1759 return(ENOENT);
1760 }
1761 /* share the underlying vnode lock with the coda vnode */
1762 mutex_obj_hold((*vpp)->v_interlock);
1763 uvm_obj_setlock(&uvp->v_uobj, (*vpp)->v_interlock);
1764 KASSERT(VOP_ISLOCKED(*vpp));
1765 return(0);
1766}
1767
1768static void
1769coda_print_vattr(struct vattr *attr)
1770{
1771 const char *typestr;
1772
1773 switch (attr->va_type) {
1774 case VNON:
1775 typestr = "VNON";
1776 break;
1777 case VREG:
1778 typestr = "VREG";
1779 break;
1780 case VDIR:
1781 typestr = "VDIR";
1782 break;
1783 case VBLK:
1784 typestr = "VBLK";
1785 break;
1786 case VCHR:
1787 typestr = "VCHR";
1788 break;
1789 case VLNK:
1790 typestr = "VLNK";
1791 break;
1792 case VSOCK:
1793 typestr = "VSCK";
1794 break;
1795 case VFIFO:
1796 typestr = "VFFO";
1797 break;
1798 case VBAD:
1799 typestr = "VBAD";
1800 break;
1801 default:
1802 typestr = "????";
1803 break;
1804 }
1805
1806
1807 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1808 typestr, (int)attr->va_mode, (int)attr->va_uid,
1809 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1810
1811 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1812 (int)attr->va_fileid, (int)attr->va_nlink,
1813 (int)attr->va_size,
1814 (int)attr->va_blocksize,(int)attr->va_bytes));
1815 myprintf((" gen %ld flags %ld vaflags %d\n",
1816 attr->va_gen, attr->va_flags, attr->va_vaflags));
1817 myprintf((" atime sec %d nsec %d\n",
1818 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1819 myprintf((" mtime sec %d nsec %d\n",
1820 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1821 myprintf((" ctime sec %d nsec %d\n",
1822 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1823}
1824
1825/*
1826 * Return a vnode for the given fid.
1827 * If no cnode exists for this fid create one and put it
1828 * in a table hashed by coda_f2i(). If the cnode for
1829 * this fid is already in the table return it (ref count is
1830 * incremented by coda_find. The cnode will be flushed from the
1831 * table when coda_inactive calls coda_unsave.
1832 */
1833struct cnode *
1834make_coda_node(CodaFid *fid, struct mount *fvsp, short type)
1835{
1836 int error __diagused;
1837 struct vnode *vp;
1838 struct cnode *cp;
1839
1840 error = vcache_get(fvsp, fid, sizeof(CodaFid), &vp);
1841 KASSERT(error == 0);
1842
1843 mutex_enter(vp->v_interlock);
1844 cp = VTOC(vp);
1845 KASSERT(cp != NULL);
1846 mutex_enter(&cp->c_lock);
1847 mutex_exit(vp->v_interlock);
1848
1849 if (vp->v_type != type) {
1850 if (vp->v_type == VCHR || vp->v_type == VBLK)
1851 spec_node_destroy(vp);
1852 vp->v_type = type;
1853 if (type == VCHR || type == VBLK)
1854 spec_node_init(vp, NODEV);
1855 uvm_vnp_setsize(vp, 0);
1856 }
1857 mutex_exit(&cp->c_lock);
1858
1859 return cp;
1860}
1861
1862/*
1863 * coda_getpages may be called on a vnode which has not been opened,
1864 * e.g. to fault in pages to execute a program. In that case, we must
1865 * open the file to get the container. The vnode may or may not be
1866 * locked, and we must leave it in the same state.
1867 */
1868int
1869coda_getpages(void *v)
1870{
1871 struct vop_getpages_args /* {
1872 vnode_t *a_vp;
1873 voff_t a_offset;
1874 struct vm_page **a_m;
1875 int *a_count;
1876 int a_centeridx;
1877 vm_prot_t a_access_type;
1878 int a_advice;
1879 int a_flags;
1880 } */ *ap = v;
1881 vnode_t *vp = ap->a_vp, *cvp;
1882 struct cnode *cp = VTOC(vp);
1883 struct lwp *l = curlwp;
1884 kauth_cred_t cred = l->l_cred;
1885 int error, cerror;
1886 int waslocked; /* 1 if vnode lock was held on entry */
1887 int didopen = 0; /* 1 if we opened container file */
1888
1889 /*
1890 * Handle a case that uvm_fault doesn't quite use yet.
1891 * See layer_vnops.c. for inspiration.
1892 */
1893 if (ap->a_flags & PGO_LOCKED) {
1894 return EBUSY;
1895 }
1896
1897 KASSERT(mutex_owned(vp->v_interlock));
1898
1899 /* Check for control object. */
1900 if (IS_CTL_VP(vp)) {
1901#ifdef CODA_VERBOSE
1902 printf("%s: control object %p\n", __func__, vp);
1903#endif
1904 return(EINVAL);
1905 }
1906
1907 /*
1908 * XXX It's really not ok to be releasing the lock we get,
1909 * because we could be overlapping with another call to
1910 * getpages and drop a lock they are relying on. We need to
1911 * figure out whether getpages ever is called holding the
1912 * lock, and if we should serialize getpages calls by some
1913 * mechanism.
1914 */
1915 /* XXX VOP_ISLOCKED() may not be used for lock decisions. */
1916 waslocked = VOP_ISLOCKED(vp);
1917
1918 /* Get container file if not already present. */
1919 cvp = cp->c_ovp;
1920 if (cvp == NULL) {
1921 /*
1922 * VOP_OPEN requires a locked vnode. We must avoid
1923 * locking the vnode if it is already locked, and
1924 * leave it in the same state on exit.
1925 */
1926 if (waslocked == 0) {
1927 mutex_exit(vp->v_interlock);
1928 cerror = vn_lock(vp, LK_EXCLUSIVE);
1929 if (cerror) {
1930#ifdef CODA_VERBOSE
1931 printf("%s: can't lock vnode %p\n",
1932 __func__, vp);
1933#endif
1934 return cerror;
1935 }
1936#ifdef CODA_VERBOSE
1937 printf("%s: locked vnode %p\n", __func__, vp);
1938#endif
1939 }
1940
1941 /*
1942 * Open file (causes upcall to venus).
1943 * XXX Perhaps we should not fully open the file, but
1944 * simply obtain a container file.
1945 */
1946 /* XXX Is it ok to do this while holding the mutex? */
1947 cerror = VOP_OPEN(vp, FREAD, cred);
1948
1949 if (cerror) {
1950#ifdef CODA_VERBOSE
1951 printf("%s: cannot open vnode %p => %d\n", __func__,
1952 vp, cerror);
1953#endif
1954 if (waslocked == 0)
1955 VOP_UNLOCK(vp);
1956 return cerror;
1957 }
1958
1959#ifdef CODA_VERBOSE
1960 printf("%s: opened vnode %p\n", __func__, vp);
1961#endif
1962 cvp = cp->c_ovp;
1963 didopen = 1;
1964 if (waslocked == 0)
1965 mutex_enter(vp->v_interlock);
1966 }
1967 KASSERT(cvp != NULL);
1968
1969 /* Munge the arg structure to refer to the container vnode. */
1970 KASSERT(cvp->v_interlock == vp->v_interlock);
1971 ap->a_vp = cp->c_ovp;
1972
1973 /* Finally, call getpages on it. */
1974 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
1975
1976 /* If we opened the vnode, we must close it. */
1977 if (didopen) {
1978 /*
1979 * VOP_CLOSE requires a locked vnode, but we are still
1980 * holding the lock (or riding a caller's lock).
1981 */
1982 cerror = VOP_CLOSE(vp, FREAD, cred);
1983#ifdef CODA_VERBOSE
1984 if (cerror != 0)
1985 /* XXX How should we handle this? */
1986 printf("%s: closed vnode %p -> %d\n", __func__,
1987 vp, cerror);
1988#endif
1989
1990 /* If we obtained a lock, drop it. */
1991 if (waslocked == 0)
1992 VOP_UNLOCK(vp);
1993 }
1994
1995 return error;
1996}
1997
1998/*
1999 * The protocol requires v_interlock to be held by the caller.
2000 */
2001int
2002coda_putpages(void *v)
2003{
2004 struct vop_putpages_args /* {
2005 vnode_t *a_vp;
2006 voff_t a_offlo;
2007 voff_t a_offhi;
2008 int a_flags;
2009 } */ *ap = v;
2010 vnode_t *vp = ap->a_vp, *cvp;
2011 struct cnode *cp = VTOC(vp);
2012 int error;
2013
2014 KASSERT(mutex_owned(vp->v_interlock));
2015
2016 /* Check for control object. */
2017 if (IS_CTL_VP(vp)) {
2018 mutex_exit(vp->v_interlock);
2019#ifdef CODA_VERBOSE
2020 printf("%s: control object %p\n", __func__, vp);
2021#endif
2022 return 0;
2023 }
2024
2025 /*
2026 * If container object is not present, then there are no pages
2027 * to put; just return without error. This happens all the
2028 * time, apparently during discard of a closed vnode (which
2029 * trivially can't have dirty pages).
2030 */
2031 cvp = cp->c_ovp;
2032 if (cvp == NULL) {
2033 mutex_exit(vp->v_interlock);
2034 return 0;
2035 }
2036
2037 /* Munge the arg structure to refer to the container vnode. */
2038 KASSERT(cvp->v_interlock == vp->v_interlock);
2039 ap->a_vp = cvp;
2040
2041 /* Finally, call putpages on it. */
2042 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2043
2044 return error;
2045}
2046