1/* $NetBSD: umap_vnops.c,v 1.57 2014/11/09 18:08:07 maxv Exp $ */
2
3/*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software donated to Berkeley by
8 * the UCLA Ficus project.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)umap_vnops.c 8.6 (Berkeley) 5/22/95
35 */
36
37/*
38 * Umap Layer
39 */
40
41#include <sys/cdefs.h>
42__KERNEL_RCSID(0, "$NetBSD: umap_vnops.c,v 1.57 2014/11/09 18:08:07 maxv Exp $");
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/time.h>
47#include <sys/vnode.h>
48#include <sys/mount.h>
49#include <sys/namei.h>
50#include <sys/buf.h>
51#include <sys/kauth.h>
52
53#include <miscfs/umapfs/umap.h>
54#include <miscfs/genfs/genfs.h>
55#include <miscfs/genfs/layer_extern.h>
56
57/*
58 * Note: If the LAYERFS_MBYPASSDEBUG flag is set, it is possible
59 * that the debug printing will bomb out, because kauth routines
60 * do not handle NOCRED or FSCRED like other credentials and end
61 * up dereferencing an inappropriate pointer.
62 *
63 * That should be fixed in kauth rather than here.
64 */
65
66int umap_lookup(void *);
67int umap_getattr(void *);
68int umap_print(void *);
69int umap_rename(void *);
70
71/*
72 * Global vfs data structures
73 */
74/*
75 * XXX - strategy, bwrite are hand coded currently. They should
76 * go away with a merged buffer/block cache.
77 *
78 */
79int (**umap_vnodeop_p)(void *);
80const struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
81 { &vop_default_desc, umap_bypass },
82
83 { &vop_lookup_desc, umap_lookup },
84 { &vop_getattr_desc, umap_getattr },
85 { &vop_print_desc, umap_print },
86 { &vop_rename_desc, umap_rename },
87
88 { &vop_fsync_desc, layer_fsync },
89 { &vop_inactive_desc, layer_inactive },
90 { &vop_reclaim_desc, layer_reclaim },
91 { &vop_lock_desc, layer_lock },
92 { &vop_open_desc, layer_open },
93 { &vop_setattr_desc, layer_setattr },
94 { &vop_access_desc, layer_access },
95 { &vop_remove_desc, layer_remove },
96 { &vop_revoke_desc, layer_revoke },
97 { &vop_rmdir_desc, layer_rmdir },
98
99 { &vop_bmap_desc, layer_bmap },
100 { &vop_getpages_desc, layer_getpages },
101 { &vop_putpages_desc, layer_putpages },
102
103 { NULL, NULL }
104};
105const struct vnodeopv_desc umapfs_vnodeop_opv_desc =
106 { &umap_vnodeop_p, umap_vnodeop_entries };
107
108/*
109 * This is the 08-June-1999 bypass routine.
110 * See layer_vnops.c:layer_bypass for more details.
111 */
112int
113umap_bypass(void *v)
114{
115 struct vop_generic_args /* {
116 struct vnodeop_desc *a_desc;
117 <other random data follows, presumably>
118 } */ *ap = v;
119 int (**our_vnodeop_p)(void *);
120 kauth_cred_t *credpp = NULL, credp = 0;
121 kauth_cred_t savecredp = 0, savecompcredp = 0;
122 kauth_cred_t compcredp = 0;
123 struct vnode **this_vp_p;
124 int error;
125 struct vnode *old_vps[VDESC_MAX_VPS], *vp0;
126 struct vnode **vps_p[VDESC_MAX_VPS];
127 struct vnode ***vppp;
128 struct vnodeop_desc *descp = ap->a_desc;
129 int reles, i, flags;
130 struct componentname **compnamepp = 0;
131
132#ifdef DIAGNOSTIC
133 /*
134 * We require at least one vp.
135 */
136 if (descp->vdesc_vp_offsets == NULL ||
137 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
138 panic("%s: no vp's in map.\n", __func__);
139#endif
140
141 vps_p[0] =
142 VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
143 vp0 = *vps_p[0];
144 flags = MOUNTTOUMAPMOUNT(vp0->v_mount)->umapm_flags;
145 our_vnodeop_p = vp0->v_op;
146
147 if (flags & LAYERFS_MBYPASSDEBUG)
148 printf("%s: %s\n", __func__, descp->vdesc_name);
149
150 /*
151 * Map the vnodes going in.
152 * Later, we'll invoke the operation based on
153 * the first mapped vnode's operation vector.
154 */
155 reles = descp->vdesc_flags;
156 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
157 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
158 break; /* bail out at end of list */
159 vps_p[i] = this_vp_p =
160 VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i],
161 ap);
162 /*
163 * We're not guaranteed that any but the first vnode
164 * are of our type. Check for and don't map any
165 * that aren't. (We must always map first vp or vclean fails.)
166 */
167 if (i && (*this_vp_p == NULL ||
168 (*this_vp_p)->v_op != our_vnodeop_p)) {
169 old_vps[i] = NULL;
170 } else {
171 old_vps[i] = *this_vp_p;
172 *(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
173 /*
174 * XXX - Several operations have the side effect
175 * of vrele'ing their vp's. We must account for
176 * that. (This should go away in the future.)
177 */
178 if (reles & VDESC_VP0_WILLRELE)
179 vref(*this_vp_p);
180 }
181
182 }
183
184 /*
185 * Fix the credentials. (That's the purpose of this layer.)
186 */
187
188 if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
189
190 credpp = VOPARG_OFFSETTO(kauth_cred_t*,
191 descp->vdesc_cred_offset, ap);
192
193 /* Save old values */
194
195 savecredp = *credpp;
196 if (savecredp != NOCRED && savecredp != FSCRED)
197 *credpp = kauth_cred_dup(savecredp);
198 credp = *credpp;
199
200 if ((flags & LAYERFS_MBYPASSDEBUG) &&
201 kauth_cred_geteuid(credp) != 0)
202 printf("umap_bypass: user was %d, group %d\n",
203 kauth_cred_geteuid(credp), kauth_cred_getegid(credp));
204
205 /* Map all ids in the credential structure. */
206
207 umap_mapids(vp0->v_mount, credp);
208
209 if ((flags & LAYERFS_MBYPASSDEBUG) &&
210 kauth_cred_geteuid(credp) != 0)
211 printf("umap_bypass: user now %d, group %d\n",
212 kauth_cred_geteuid(credp), kauth_cred_getegid(credp));
213 }
214
215 /* BSD often keeps a credential in the componentname structure
216 * for speed. If there is one, it better get mapped, too.
217 */
218
219 if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
220
221 compnamepp = VOPARG_OFFSETTO(struct componentname**,
222 descp->vdesc_componentname_offset, ap);
223
224 savecompcredp = (*compnamepp)->cn_cred;
225 if (savecompcredp != NOCRED && savecompcredp != FSCRED)
226 (*compnamepp)->cn_cred = kauth_cred_dup(savecompcredp);
227 compcredp = (*compnamepp)->cn_cred;
228
229 if ((flags & LAYERFS_MBYPASSDEBUG) &&
230 kauth_cred_geteuid(compcredp) != 0)
231 printf("umap_bypass: component credit user was %d, group %d\n",
232 kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
233
234 /* Map all ids in the credential structure. */
235
236 umap_mapids(vp0->v_mount, compcredp);
237
238 if ((flags & LAYERFS_MBYPASSDEBUG) &&
239 kauth_cred_geteuid(compcredp) != 0)
240 printf("umap_bypass: component credit user now %d, group %d\n",
241 kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
242 }
243
244 /*
245 * Call the operation on the lower layer
246 * with the modified argument structure.
247 */
248 error = VCALL(*vps_p[0], descp->vdesc_offset, ap);
249
250 /*
251 * Maintain the illusion of call-by-value
252 * by restoring vnodes in the argument structure
253 * to their original value.
254 */
255 reles = descp->vdesc_flags;
256 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
257 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
258 break; /* bail out at end of list */
259 if (old_vps[i]) {
260 *(vps_p[i]) = old_vps[i];
261 if (reles & VDESC_VP0_WILLRELE)
262 vrele(*(vps_p[i]));
263 }
264 }
265
266 /*
267 * Map the possible out-going vpp
268 * (Assumes that the lower layer always returns
269 * a VREF'ed vpp unless it gets an error.)
270 */
271 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && !error) {
272 vppp = VOPARG_OFFSETTO(struct vnode***,
273 descp->vdesc_vpp_offset, ap);
274 /*
275 * Only vop_lookup, vop_create, vop_makedir, vop_mknod
276 * and vop_symlink return vpp's. vop_lookup doesn't call bypass
277 * as a lookup on "." would generate a locking error.
278 * So all the calls which get us here have a unlocked vpp. :-)
279 */
280 error = layer_node_create(old_vps[0]->v_mount, **vppp, *vppp);
281 if (error) {
282 vrele(**vppp);
283 **vppp = NULL;
284 }
285 }
286
287 /*
288 * Free duplicate cred structure and restore old one.
289 */
290 if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
291 if ((flags & LAYERFS_MBYPASSDEBUG) && credp &&
292 kauth_cred_geteuid(credp) != 0)
293 printf("umap_bypass: returning-user was %d\n",
294 kauth_cred_geteuid(credp));
295
296 if (savecredp != NOCRED && savecredp != FSCRED && credpp) {
297 kauth_cred_free(credp);
298 *credpp = savecredp;
299 if ((flags & LAYERFS_MBYPASSDEBUG) && credpp &&
300 kauth_cred_geteuid(*credpp) != 0)
301 printf("umap_bypass: returning-user now %d\n\n",
302 kauth_cred_geteuid(savecredp));
303 }
304 }
305
306 if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
307 if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
308 kauth_cred_geteuid(compcredp) != 0)
309 printf("umap_bypass: returning-component-user was %d\n",
310 kauth_cred_geteuid(compcredp));
311
312 if (savecompcredp != NOCRED && savecompcredp != FSCRED) {
313 kauth_cred_free(compcredp);
314 (*compnamepp)->cn_cred = savecompcredp;
315 if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
316 kauth_cred_geteuid(savecompcredp) != 0)
317 printf("umap_bypass: returning-component-user now %d\n",
318 kauth_cred_geteuid(savecompcredp));
319 }
320 }
321
322 return (error);
323}
324
325/*
326 * This is based on the 08-June-1999 bypass routine.
327 * See layer_vnops.c:layer_bypass for more details.
328 */
329int
330umap_lookup(void *v)
331{
332 struct vop_lookup_v2_args /* {
333 struct vnodeop_desc *a_desc;
334 struct vnode * a_dvp;
335 struct vnode ** a_vpp;
336 struct componentname * a_cnp;
337 } */ *ap = v;
338 struct componentname *cnp = ap->a_cnp;
339 kauth_cred_t savecompcredp = NULL;
340 kauth_cred_t compcredp = NULL;
341 struct vnode *dvp, *vp, *ldvp;
342 struct mount *mp;
343 int error;
344 int flags, cnf = cnp->cn_flags;
345
346 dvp = ap->a_dvp;
347 mp = dvp->v_mount;
348
349 if ((cnf & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
350 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
351 return (EROFS);
352
353 flags = MOUNTTOUMAPMOUNT(mp)->umapm_flags;
354 ldvp = UMAPVPTOLOWERVP(dvp);
355
356 if (flags & LAYERFS_MBYPASSDEBUG)
357 printf("umap_lookup\n");
358
359 /*
360 * Fix the credentials. (That's the purpose of this layer.)
361 *
362 * BSD often keeps a credential in the componentname structure
363 * for speed. If there is one, it better get mapped, too.
364 */
365
366 if ((savecompcredp = cnp->cn_cred)) {
367 compcredp = kauth_cred_dup(savecompcredp);
368 cnp->cn_cred = compcredp;
369
370 if ((flags & LAYERFS_MBYPASSDEBUG) &&
371 kauth_cred_geteuid(compcredp) != 0)
372 printf("umap_lookup: component credit user was %d, group %d\n",
373 kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
374
375 /* Map all ids in the credential structure. */
376 umap_mapids(mp, compcredp);
377 }
378
379 if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
380 kauth_cred_geteuid(compcredp) != 0)
381 printf("umap_lookup: component credit user now %d, group %d\n",
382 kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
383
384 ap->a_dvp = ldvp;
385 error = VCALL(ldvp, ap->a_desc->vdesc_offset, ap);
386 vp = *ap->a_vpp;
387 *ap->a_vpp = NULL;
388
389 if (error == EJUSTRETURN && (cnf & ISLASTCN) &&
390 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
391 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
392 error = EROFS;
393
394 /* Do locking fixup as appropriate. See layer_lookup() for info */
395 if (ldvp == vp) {
396 *ap->a_vpp = dvp;
397 vref(dvp);
398 vrele(vp);
399 } else if (vp != NULL) {
400 error = layer_node_create(mp, vp, ap->a_vpp);
401 if (error) {
402 vrele(vp);
403 }
404 }
405
406 /*
407 * Free duplicate cred structure and restore old one.
408 */
409 if ((flags & LAYERFS_MBYPASSDEBUG) && compcredp &&
410 kauth_cred_geteuid(compcredp) != 0)
411 printf("umap_lookup: returning-component-user was %d\n",
412 kauth_cred_geteuid(compcredp));
413
414 if (savecompcredp != NOCRED && savecompcredp != FSCRED) {
415 if (compcredp)
416 kauth_cred_free(compcredp);
417 cnp->cn_cred = savecompcredp;
418 if ((flags & LAYERFS_MBYPASSDEBUG) && savecompcredp &&
419 kauth_cred_geteuid(savecompcredp) != 0)
420 printf("umap_lookup: returning-component-user now %d\n",
421 kauth_cred_geteuid(savecompcredp));
422 }
423
424 return (error);
425}
426
427/*
428 * We handle getattr to change the fsid.
429 */
430int
431umap_getattr(void *v)
432{
433 struct vop_getattr_args /* {
434 struct vnode *a_vp;
435 struct vattr *a_vap;
436 kauth_cred_t a_cred;
437 struct lwp *a_l;
438 } */ *ap = v;
439 uid_t uid;
440 gid_t gid;
441 int error, tmpid, nentries, gnentries, flags;
442 u_long (*mapdata)[2];
443 u_long (*gmapdata)[2];
444 struct vnode **vp1p;
445 const struct vnodeop_desc *descp = ap->a_desc;
446
447 if ((error = umap_bypass(ap)) != 0)
448 return (error);
449 /* Requires that arguments be restored. */
450 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
451
452 flags = MOUNTTOUMAPMOUNT(ap->a_vp->v_mount)->umapm_flags;
453 /*
454 * Umap needs to map the uid and gid returned by a stat
455 * into the proper values for this site. This involves
456 * finding the returned uid in the mapping information,
457 * translating it into the uid on the other end,
458 * and filling in the proper field in the vattr
459 * structure pointed to by ap->a_vap. The group
460 * is easier, since currently all groups will be
461 * translate to the NULLGROUP.
462 */
463
464 /* Find entry in map */
465
466 uid = ap->a_vap->va_uid;
467 gid = ap->a_vap->va_gid;
468 if ((flags & LAYERFS_MBYPASSDEBUG))
469 printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
470 gid);
471
472 vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
473 nentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
474 mapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
475 gnentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
476 gmapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
477
478 /* Reverse map the uid for the vnode. Since it's a reverse
479 map, we can't use umap_mapids() to do it. */
480
481 tmpid = umap_reverse_findid(uid, mapdata, nentries);
482
483 if (tmpid != -1) {
484 ap->a_vap->va_uid = (uid_t) tmpid;
485 if ((flags & LAYERFS_MBYPASSDEBUG))
486 printf("umap_getattr: original uid = %d\n", uid);
487 } else
488 ap->a_vap->va_uid = (uid_t) NOBODY;
489
490 /* Reverse map the gid for the vnode. */
491
492 tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
493
494 if (tmpid != -1) {
495 ap->a_vap->va_gid = (gid_t) tmpid;
496 if ((flags & LAYERFS_MBYPASSDEBUG))
497 printf("umap_getattr: original gid = %d\n", gid);
498 } else
499 ap->a_vap->va_gid = (gid_t) NULLGROUP;
500
501 return (0);
502}
503
504int
505umap_print(void *v)
506{
507 struct vop_print_args /* {
508 struct vnode *a_vp;
509 } */ *ap = v;
510 struct vnode *vp = ap->a_vp;
511 printf("\ttag VT_UMAPFS, vp=%p, lowervp=%p\n", vp,
512 UMAPVPTOLOWERVP(vp));
513 return (0);
514}
515
516int
517umap_rename(void *v)
518{
519 struct vop_rename_args /* {
520 struct vnode *a_fdvp;
521 struct vnode *a_fvp;
522 struct componentname *a_fcnp;
523 struct vnode *a_tdvp;
524 struct vnode *a_tvp;
525 struct componentname *a_tcnp;
526 } */ *ap = v;
527 int error, flags;
528 struct componentname *compnamep;
529 kauth_cred_t compcredp, savecompcredp;
530 struct vnode *vp;
531 struct vnode *tvp;
532
533 /*
534 * Rename is irregular, having two componentname structures.
535 * We need to map the cre in the second structure,
536 * and then bypass takes care of the rest.
537 */
538
539 vp = ap->a_fdvp;
540 flags = MOUNTTOUMAPMOUNT(vp->v_mount)->umapm_flags;
541 compnamep = ap->a_tcnp;
542 compcredp = compnamep->cn_cred;
543
544 savecompcredp = compcredp;
545 compcredp = compnamep->cn_cred = kauth_cred_dup(savecompcredp);
546
547 if ((flags & LAYERFS_MBYPASSDEBUG) &&
548 kauth_cred_geteuid(compcredp) != 0)
549 printf("umap_rename: rename component credit user was %d, group %d\n",
550 kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
551
552 /* Map all ids in the credential structure. */
553
554 umap_mapids(vp->v_mount, compcredp);
555
556 if ((flags & LAYERFS_MBYPASSDEBUG) &&
557 kauth_cred_geteuid(compcredp) != 0)
558 printf("umap_rename: rename component credit user now %d, group %d\n",
559 kauth_cred_geteuid(compcredp), kauth_cred_getegid(compcredp));
560
561 tvp = ap->a_tvp;
562 if (tvp) {
563 if (tvp->v_mount != vp->v_mount)
564 tvp = NULL;
565 else
566 vref(tvp);
567 }
568 error = umap_bypass(ap);
569 if (tvp) {
570 if (error == 0)
571 VTOLAYER(tvp)->layer_flags |= LAYERFS_REMOVED;
572 vrele(tvp);
573 }
574
575 /* Restore the additional mapped componentname cred structure. */
576
577 kauth_cred_free(compcredp);
578 compnamep->cn_cred = savecompcredp;
579
580 return error;
581}
582