Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : #include <linux/blkdev.h>
3 :
4 : #include "../dm-core.h"
5 : #include "pcache_internal.h"
6 : #include "cache_dev.h"
7 : #include "backing_dev.h"
8 : #include "cache.h"
9 : #include "dm_pcache.h"
10 :
11 : static struct kmem_cache *backing_req_cache;
12 : static struct kmem_cache *backing_bvec_cache;
13 :
14 193 : static void backing_dev_exit(struct pcache_backing_dev *backing_dev)
15 : {
16 193 : mempool_exit(&backing_dev->req_pool);
17 193 : mempool_exit(&backing_dev->bvec_pool);
18 : }
19 :
20 : static void req_submit_fn(struct work_struct *work);
21 : static void req_complete_fn(struct work_struct *work);
22 193 : static int backing_dev_init(struct dm_pcache *pcache)
23 : {
24 193 : struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
25 193 : int ret;
26 :
27 193 : ret = mempool_init_slab_pool(&backing_dev->req_pool, 128, backing_req_cache);
28 193 : if (ret)
29 0 : goto err;
30 :
31 193 : ret = mempool_init_slab_pool(&backing_dev->bvec_pool, 128, backing_bvec_cache);
32 193 : if (ret)
33 0 : goto req_pool_exit;
34 :
35 193 : INIT_LIST_HEAD(&backing_dev->submit_list);
36 193 : INIT_LIST_HEAD(&backing_dev->complete_list);
37 193 : spin_lock_init(&backing_dev->submit_lock);
38 193 : spin_lock_init(&backing_dev->complete_lock);
39 193 : INIT_WORK(&backing_dev->req_submit_work, req_submit_fn);
40 193 : INIT_WORK(&backing_dev->req_complete_work, req_complete_fn);
41 193 : atomic_set(&backing_dev->inflight_reqs, 0);
42 193 : init_waitqueue_head(&backing_dev->inflight_wq);
43 :
44 193 : return 0;
45 :
46 0 : req_pool_exit:
47 0 : mempool_exit(&backing_dev->req_pool);
48 : err:
49 : return ret;
50 : }
51 :
52 193 : int backing_dev_start(struct dm_pcache *pcache)
53 : {
54 193 : struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
55 193 : int ret;
56 :
57 193 : ret = backing_dev_init(pcache);
58 193 : if (ret)
59 : return ret;
60 :
61 193 : backing_dev->dev_size = bdev_nr_sectors(backing_dev->dm_dev->bdev);
62 :
63 193 : return 0;
64 : }
65 :
66 193 : void backing_dev_stop(struct dm_pcache *pcache)
67 : {
68 193 : struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
69 :
70 : /*
71 : * There should not be any new request comming, just wait
72 : * inflight requests done.
73 : */
74 193 : wait_event(backing_dev->inflight_wq,
75 : atomic_read(&backing_dev->inflight_reqs) == 0);
76 :
77 193 : flush_work(&backing_dev->req_submit_work);
78 193 : flush_work(&backing_dev->req_complete_work);
79 :
80 193 : backing_dev_exit(backing_dev);
81 193 : }
82 :
83 : /* pcache_backing_dev_req functions */
84 22965527 : void backing_dev_req_end(struct pcache_backing_dev_req *backing_req)
85 : {
86 22965527 : struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
87 :
88 22965527 : if (backing_req->end_req)
89 22965523 : backing_req->end_req(backing_req, backing_req->ret);
90 :
91 22965569 : switch (backing_req->type) {
92 6973236 : case BACKING_DEV_REQ_TYPE_REQ:
93 6973236 : if (backing_req->req.upper_req)
94 6973232 : pcache_req_put(backing_req->req.upper_req, backing_req->ret);
95 : break;
96 15992333 : case BACKING_DEV_REQ_TYPE_KMEM:
97 15992333 : if (backing_req->kmem.bvecs != backing_req->kmem.inline_bvecs)
98 5026246 : mempool_free(backing_req->kmem.bvecs, &backing_dev->bvec_pool);
99 : break;
100 0 : default:
101 0 : BUG();
102 : }
103 :
104 22965570 : mempool_free(backing_req, &backing_dev->req_pool);
105 :
106 22965520 : if (atomic_dec_and_test(&backing_dev->inflight_reqs))
107 3232963 : wake_up(&backing_dev->inflight_wq);
108 22965562 : }
109 :
110 8356682 : static void req_complete_fn(struct work_struct *work)
111 : {
112 8356682 : struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_complete_work);
113 8356682 : struct pcache_backing_dev_req *backing_req;
114 8356682 : LIST_HEAD(tmp_list);
115 :
116 8356682 : spin_lock_irq(&backing_dev->complete_lock);
117 8356684 : list_splice_init(&backing_dev->complete_list, &tmp_list);
118 8356684 : spin_unlock_irq(&backing_dev->complete_lock);
119 :
120 39678914 : while (!list_empty(&tmp_list)) {
121 22965547 : backing_req = list_first_entry(&tmp_list,
122 : struct pcache_backing_dev_req, node);
123 22965547 : list_del_init(&backing_req->node);
124 22965547 : backing_dev_req_end(backing_req);
125 : }
126 8356683 : }
127 :
128 22965374 : static void backing_dev_bio_end(struct bio *bio)
129 : {
130 22965374 : struct pcache_backing_dev_req *backing_req = bio->bi_private;
131 22965374 : struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
132 22965374 : unsigned long flags;
133 :
134 22965374 : backing_req->ret = bio->bi_status;
135 :
136 22965374 : spin_lock_irqsave(&backing_dev->complete_lock, flags);
137 22965519 : list_move_tail(&backing_req->node, &backing_dev->complete_list);
138 22965519 : queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_complete_work);
139 22965504 : spin_unlock_irqrestore(&backing_dev->complete_lock, flags);
140 22965500 : }
141 :
142 467796 : static void req_submit_fn(struct work_struct *work)
143 : {
144 467796 : struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_submit_work);
145 467796 : struct pcache_backing_dev_req *backing_req;
146 467796 : LIST_HEAD(tmp_list);
147 :
148 467796 : spin_lock(&backing_dev->submit_lock);
149 467796 : list_splice_init(&backing_dev->submit_list, &tmp_list);
150 467796 : spin_unlock(&backing_dev->submit_lock);
151 :
152 7908824 : while (!list_empty(&tmp_list)) {
153 6973232 : backing_req = list_first_entry(&tmp_list,
154 : struct pcache_backing_dev_req, node);
155 6973232 : list_del_init(&backing_req->node);
156 6973232 : submit_bio_noacct(&backing_req->bio);
157 : }
158 467796 : }
159 :
160 22924099 : void backing_dev_req_submit(struct pcache_backing_dev_req *backing_req, bool direct)
161 : {
162 22924099 : struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
163 :
164 22924099 : if (direct) {
165 15992296 : submit_bio_noacct(&backing_req->bio);
166 15992296 : return;
167 : }
168 :
169 6931803 : spin_lock(&backing_dev->submit_lock);
170 6973232 : list_add_tail(&backing_req->node, &backing_dev->submit_list);
171 6973232 : queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_submit_work);
172 6973232 : spin_unlock(&backing_dev->submit_lock);
173 : }
174 :
175 15992275 : static void bio_map(struct bio *bio, void *base, size_t size)
176 : {
177 15992275 : struct page *page;
178 15992275 : unsigned int offset;
179 15992275 : unsigned int len;
180 :
181 15992275 : if (!is_vmalloc_addr(base)) {
182 1369579 : page = virt_to_page(base);
183 1369579 : offset = offset_in_page(base);
184 :
185 1369579 : BUG_ON(!bio_add_page(bio, page, size, offset));
186 : return;
187 : }
188 :
189 : flush_kernel_vmap_range(base, size);
190 165889821 : while (size) {
191 151267168 : page = vmalloc_to_page(base);
192 151267266 : offset = offset_in_page(base);
193 151267266 : len = min_t(size_t, PAGE_SIZE - offset, size);
194 :
195 151267266 : BUG_ON(!bio_add_page(bio, page, len, offset));
196 151267121 : size -= len;
197 151267121 : base += len;
198 : }
199 : }
200 :
201 6908319 : static struct pcache_backing_dev_req *req_type_req_alloc(struct pcache_backing_dev *backing_dev,
202 : struct pcache_backing_dev_req_opts *opts)
203 : {
204 6908319 : struct pcache_request *pcache_req = opts->req.upper_req;
205 6908319 : struct pcache_backing_dev_req *backing_req;
206 6908319 : struct bio *orig = pcache_req->bio;
207 :
208 6908319 : backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
209 6940037 : if (!backing_req)
210 : return NULL;
211 :
212 6939841 : memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
213 :
214 6939841 : bio_init_clone(backing_dev->dm_dev->bdev, &backing_req->bio, orig, opts->gfp_mask);
215 :
216 6960894 : backing_req->type = BACKING_DEV_REQ_TYPE_REQ;
217 6960894 : backing_req->backing_dev = backing_dev;
218 6960894 : atomic_inc(&backing_dev->inflight_reqs);
219 :
220 6960894 : return backing_req;
221 : }
222 :
223 15992221 : static struct pcache_backing_dev_req *kmem_type_req_alloc(struct pcache_backing_dev *backing_dev,
224 : struct pcache_backing_dev_req_opts *opts)
225 : {
226 15992221 : struct pcache_backing_dev_req *backing_req;
227 15992221 : u32 n_vecs = bio_add_max_vecs(opts->kmem.data, opts->kmem.len);
228 :
229 15992215 : backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
230 15992309 : if (!backing_req)
231 : return NULL;
232 :
233 15992309 : memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
234 :
235 15992309 : if (n_vecs > BACKING_DEV_REQ_INLINE_BVECS) {
236 5026246 : backing_req->kmem.bvecs = mempool_alloc(&backing_dev->bvec_pool, opts->gfp_mask);
237 5026248 : if (!backing_req->kmem.bvecs)
238 0 : goto free_backing_req;
239 : } else {
240 10966063 : backing_req->kmem.bvecs = backing_req->kmem.inline_bvecs;
241 : }
242 :
243 15992311 : backing_req->kmem.n_vecs = n_vecs;
244 15992311 : backing_req->type = BACKING_DEV_REQ_TYPE_KMEM;
245 15992311 : backing_req->backing_dev = backing_dev;
246 15992311 : atomic_inc(&backing_dev->inflight_reqs);
247 :
248 15992311 : return backing_req;
249 :
250 0 : free_backing_req:
251 0 : mempool_free(backing_req, &backing_dev->req_pool);
252 0 : return NULL;
253 : }
254 :
255 22901462 : struct pcache_backing_dev_req *backing_dev_req_alloc(struct pcache_backing_dev *backing_dev,
256 : struct pcache_backing_dev_req_opts *opts)
257 : {
258 22901462 : if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
259 6909236 : return req_type_req_alloc(backing_dev, opts);
260 :
261 15992226 : if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
262 15992226 : return kmem_type_req_alloc(backing_dev, opts);
263 :
264 0 : BUG();
265 : }
266 :
267 6937009 : static void req_type_req_init(struct pcache_backing_dev_req *backing_req,
268 : struct pcache_backing_dev_req_opts *opts)
269 : {
270 6937009 : struct pcache_request *pcache_req = opts->req.upper_req;
271 6937009 : struct bio *clone;
272 6937009 : u32 off = opts->req.req_off;
273 6937009 : u32 len = opts->req.len;
274 :
275 6937009 : clone = &backing_req->bio;
276 6937009 : BUG_ON(off & SECTOR_MASK);
277 6937009 : BUG_ON(len & SECTOR_MASK);
278 6937009 : bio_trim(clone, off >> SECTOR_SHIFT, len >> SECTOR_SHIFT);
279 :
280 6932490 : clone->bi_iter.bi_sector = (pcache_req->off + off) >> SECTOR_SHIFT;
281 6932490 : clone->bi_private = backing_req;
282 6932490 : clone->bi_end_io = backing_dev_bio_end;
283 :
284 6932490 : INIT_LIST_HEAD(&backing_req->node);
285 6932490 : backing_req->end_req = opts->end_fn;
286 :
287 6932490 : pcache_req_get(pcache_req);
288 6960987 : backing_req->req.upper_req = pcache_req;
289 6960987 : backing_req->req.bio_off = off;
290 6960987 : }
291 :
292 15992253 : static void kmem_type_req_init(struct pcache_backing_dev_req *backing_req,
293 : struct pcache_backing_dev_req_opts *opts)
294 : {
295 15992253 : struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
296 15992253 : struct bio *backing_bio;
297 :
298 15992253 : bio_init(&backing_req->bio, backing_dev->dm_dev->bdev, backing_req->kmem.bvecs,
299 15992253 : backing_req->kmem.n_vecs, opts->kmem.opf);
300 :
301 15992279 : backing_bio = &backing_req->bio;
302 15992279 : bio_map(backing_bio, opts->kmem.data, opts->kmem.len);
303 :
304 15992198 : backing_bio->bi_iter.bi_sector = (opts->kmem.backing_off) >> SECTOR_SHIFT;
305 15992198 : backing_bio->bi_private = backing_req;
306 15992198 : backing_bio->bi_end_io = backing_dev_bio_end;
307 :
308 15992198 : INIT_LIST_HEAD(&backing_req->node);
309 15992198 : backing_req->end_req = opts->end_fn;
310 15992198 : backing_req->priv_data = opts->priv_data;
311 15992198 : }
312 :
313 22931077 : void backing_dev_req_init(struct pcache_backing_dev_req *backing_req,
314 : struct pcache_backing_dev_req_opts *opts)
315 : {
316 22931077 : if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
317 6938825 : return req_type_req_init(backing_req, opts);
318 :
319 15992252 : if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
320 15992252 : return kmem_type_req_init(backing_req, opts);
321 :
322 0 : BUG();
323 : }
324 :
325 15992230 : struct pcache_backing_dev_req *backing_dev_req_create(struct pcache_backing_dev *backing_dev,
326 : struct pcache_backing_dev_req_opts *opts)
327 : {
328 15992230 : struct pcache_backing_dev_req *backing_req;
329 :
330 15992230 : backing_req = backing_dev_req_alloc(backing_dev, opts);
331 15992276 : if (!backing_req)
332 : return NULL;
333 :
334 15992276 : backing_dev_req_init(backing_req, opts);
335 :
336 15992276 : return backing_req;
337 : }
338 :
339 2974743 : void backing_dev_flush(struct pcache_backing_dev *backing_dev)
340 : {
341 2974743 : blkdev_issue_flush(backing_dev->dm_dev->bdev);
342 2974743 : }
343 :
344 159 : int pcache_backing_init(void)
345 : {
346 159 : u32 max_bvecs = (PCACHE_CACHE_SUBTREE_SIZE >> PAGE_SHIFT) + 1;
347 159 : int ret;
348 :
349 159 : backing_req_cache = KMEM_CACHE(pcache_backing_dev_req, 0);
350 159 : if (!backing_req_cache) {
351 0 : ret = -ENOMEM;
352 0 : goto err;
353 : }
354 :
355 159 : backing_bvec_cache = kmem_cache_create("pcache-bvec-slab",
356 : max_bvecs * sizeof(struct bio_vec),
357 : 0, 0, NULL);
358 159 : if (!backing_bvec_cache) {
359 0 : ret = -ENOMEM;
360 0 : goto destroy_req_cache;
361 : }
362 :
363 : return 0;
364 0 : destroy_req_cache:
365 0 : kmem_cache_destroy(backing_req_cache);
366 : err:
367 : return ret;
368 : }
369 :
370 87 : void pcache_backing_exit(void)
371 : {
372 87 : kmem_cache_destroy(backing_bvec_cache);
373 87 : kmem_cache_destroy(backing_req_cache);
374 87 : }
|