1/* $NetBSD: tmpfs_mem.c,v 1.9 2016/08/22 23:07:36 skrll Exp $ */
2
3/*
4 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * tmpfs memory allocation routines.
34 * Implements memory usage accounting and limiting.
35 */
36
37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: tmpfs_mem.c,v 1.9 2016/08/22 23:07:36 skrll Exp $");
39
40#include <sys/param.h>
41#include <sys/atomic.h>
42#include <sys/kmem.h>
43#include <sys/namei.h>
44#include <sys/pool.h>
45
46#include <fs/tmpfs/tmpfs.h>
47
48extern struct pool tmpfs_dirent_pool;
49extern struct pool tmpfs_node_pool;
50
51void
52tmpfs_mntmem_init(struct tmpfs_mount *mp, uint64_t memlimit)
53{
54
55 mutex_init(&mp->tm_acc_lock, MUTEX_DEFAULT, IPL_NONE);
56 mp->tm_mem_limit = memlimit;
57 mp->tm_bytes_used = 0;
58}
59
60void
61tmpfs_mntmem_destroy(struct tmpfs_mount *mp)
62{
63
64 KASSERT(mp->tm_bytes_used == 0);
65 mutex_destroy(&mp->tm_acc_lock);
66}
67
68int
69tmpfs_mntmem_set(struct tmpfs_mount *mp, uint64_t memlimit)
70{
71 int error;
72
73 mutex_enter(&mp->tm_acc_lock);
74 if (round_page(mp->tm_bytes_used) >= memlimit)
75 error = EBUSY;
76 else {
77 error = 0;
78 mp->tm_mem_limit = memlimit;
79 }
80 mutex_exit(&mp->tm_acc_lock);
81 return error;
82}
83
84
85
86/*
87 * tmpfs_mem_info: return the number of available memory pages.
88 *
89 * => If 'total' is true, then return _total_ amount of pages.
90 * => If false, then return the amount of _free_ memory pages.
91 *
92 * Remember to remove uvmexp.freetarg from the returned value to avoid
93 * excessive memory usage.
94 */
95size_t
96tmpfs_mem_info(bool total)
97{
98 size_t size = 0;
99
100 /* XXX: unlocked */
101 size += uvmexp.swpgavail;
102 if (!total) {
103 size -= uvmexp.swpgonly;
104 }
105 size += uvmexp.free;
106 size += uvmexp.filepages;
107 if (size > uvmexp.wired) {
108 size -= uvmexp.wired;
109 } else {
110 size = 0;
111 }
112 return size;
113}
114
115uint64_t
116tmpfs_bytes_max(struct tmpfs_mount *mp)
117{
118 psize_t freepages = tmpfs_mem_info(false);
119 int freetarg = uvmexp.freetarg; // XXX unlocked
120 uint64_t avail_mem;
121
122 if (freepages < freetarg) {
123 freepages = 0;
124 } else {
125 freepages -= freetarg;
126 }
127 avail_mem = round_page(mp->tm_bytes_used) + (freepages << PAGE_SHIFT);
128 return MIN(mp->tm_mem_limit, avail_mem);
129}
130
131size_t
132tmpfs_pages_avail(struct tmpfs_mount *mp)
133{
134
135 return (tmpfs_bytes_max(mp) - mp->tm_bytes_used) >> PAGE_SHIFT;
136}
137
138bool
139tmpfs_mem_incr(struct tmpfs_mount *mp, size_t sz)
140{
141 uint64_t lim;
142
143 mutex_enter(&mp->tm_acc_lock);
144 lim = tmpfs_bytes_max(mp);
145 if (mp->tm_bytes_used + sz >= lim) {
146 mutex_exit(&mp->tm_acc_lock);
147 return false;
148 }
149 mp->tm_bytes_used += sz;
150 mutex_exit(&mp->tm_acc_lock);
151 return true;
152}
153
154void
155tmpfs_mem_decr(struct tmpfs_mount *mp, size_t sz)
156{
157
158 mutex_enter(&mp->tm_acc_lock);
159 KASSERT(mp->tm_bytes_used >= sz);
160 mp->tm_bytes_used -= sz;
161 mutex_exit(&mp->tm_acc_lock);
162}
163
164struct tmpfs_dirent *
165tmpfs_dirent_get(struct tmpfs_mount *mp)
166{
167
168 if (!tmpfs_mem_incr(mp, sizeof(struct tmpfs_dirent))) {
169 return NULL;
170 }
171 return pool_get(&tmpfs_dirent_pool, PR_WAITOK);
172}
173
174void
175tmpfs_dirent_put(struct tmpfs_mount *mp, struct tmpfs_dirent *de)
176{
177
178 tmpfs_mem_decr(mp, sizeof(struct tmpfs_dirent));
179 pool_put(&tmpfs_dirent_pool, de);
180}
181
182struct tmpfs_node *
183tmpfs_node_get(struct tmpfs_mount *mp)
184{
185
186 if (atomic_inc_uint_nv(&mp->tm_nodes_cnt) >= mp->tm_nodes_max) {
187 atomic_dec_uint(&mp->tm_nodes_cnt);
188 return NULL;
189 }
190 if (!tmpfs_mem_incr(mp, sizeof(struct tmpfs_node))) {
191 atomic_dec_uint(&mp->tm_nodes_cnt);
192 return NULL;
193 }
194 return pool_get(&tmpfs_node_pool, PR_WAITOK);
195}
196
197void
198tmpfs_node_put(struct tmpfs_mount *mp, struct tmpfs_node *tn)
199{
200
201 atomic_dec_uint(&mp->tm_nodes_cnt);
202 tmpfs_mem_decr(mp, sizeof(struct tmpfs_node));
203 pool_put(&tmpfs_node_pool, tn);
204}
205
206/*
207 * Quantum size to round-up the tmpfs names in order to reduce re-allocations.
208 */
209
210#define TMPFS_NAME_QUANTUM (32)
211
212char *
213tmpfs_strname_alloc(struct tmpfs_mount *mp, size_t len)
214{
215 const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM);
216
217 KASSERT(sz > 0 && sz <= 1024);
218 if (!tmpfs_mem_incr(mp, sz)) {
219 return NULL;
220 }
221 return kmem_alloc(sz, KM_SLEEP);
222}
223
224void
225tmpfs_strname_free(struct tmpfs_mount *mp, char *str, size_t len)
226{
227 const size_t sz = roundup2(len, TMPFS_NAME_QUANTUM);
228
229 KASSERT(sz > 0 && sz <= 1024);
230 tmpfs_mem_decr(mp, sz);
231 kmem_free(str, sz);
232}
233
234bool
235tmpfs_strname_neqlen(struct componentname *fcnp, struct componentname *tcnp)
236{
237 const size_t fln = roundup2(fcnp->cn_namelen, TMPFS_NAME_QUANTUM);
238 const size_t tln = roundup2(tcnp->cn_namelen, TMPFS_NAME_QUANTUM);
239
240 return (fln != tln) || memcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fln);
241}
242