LCOV - code coverage report
Current view: top level - dm-pcache - cache_segment.c (source / functions) Coverage Total Hit
Test: dm_pcache.info Lines: 91.0 % 166 151
Test Date: 2025-08-08 03:56:04 Functions: 100.0 % 12 12
Legend: Lines: hit not hit

            Line data    Source code
       1              : // SPDX-License-Identifier: GPL-2.0-or-later
       2              : 
       3              : #include "cache_dev.h"
       4              : #include "cache.h"
       5              : #include "backing_dev.h"
       6              : #include "dm_pcache.h"
       7              : 
       8       148828 : static inline struct pcache_segment_info *get_seg_info_addr(struct pcache_cache_segment *cache_seg)
       9              : {
      10       148828 :         struct pcache_segment_info *seg_info_addr;
      11       148828 :         u32 seg_id = cache_seg->segment.seg_id;
      12       148828 :         void *seg_addr;
      13              : 
      14       148828 :         seg_addr = CACHE_DEV_SEGMENT(cache_seg->cache->cache_dev, seg_id);
      15       148828 :         seg_info_addr = seg_addr + PCACHE_SEG_INFO_SIZE * cache_seg->info_index;
      16              : 
      17       148828 :         return seg_info_addr;
      18              : }
      19              : 
      20       148828 : static void cache_seg_info_write(struct pcache_cache_segment *cache_seg)
      21              : {
      22       148828 :         struct pcache_segment_info *seg_info_addr;
      23       148828 :         struct pcache_segment_info *seg_info = &cache_seg->cache_seg_info;
      24              : 
      25       148828 :         mutex_lock(&cache_seg->info_lock);
      26       148828 :         seg_info->header.seq++;
      27       148828 :         seg_info->header.crc = pcache_meta_crc(&seg_info->header, sizeof(struct pcache_segment_info));
      28              : 
      29       148828 :         seg_info_addr = get_seg_info_addr(cache_seg);
      30       148828 :         memcpy_flushcache(seg_info_addr, seg_info, sizeof(struct pcache_segment_info));
      31       148828 :         pmem_wmb();
      32              : 
      33       148828 :         cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX;
      34       148828 :         mutex_unlock(&cache_seg->info_lock);
      35       148828 : }
      36              : 
      37        31999 : static int cache_seg_info_load(struct pcache_cache_segment *cache_seg)
      38              : {
      39        31999 :         struct pcache_segment_info *cache_seg_info_addr_base, *cache_seg_info_addr;
      40        31999 :         struct pcache_cache_dev *cache_dev = cache_seg->cache->cache_dev;
      41        31999 :         struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
      42        31999 :         u32 seg_id = cache_seg->segment.seg_id;
      43        31999 :         int ret = 0;
      44              : 
      45        31999 :         cache_seg_info_addr_base = CACHE_DEV_SEGMENT(cache_dev, seg_id);
      46              : 
      47        31999 :         mutex_lock(&cache_seg->info_lock);
      48        63998 :         cache_seg_info_addr = pcache_meta_find_latest(&cache_seg_info_addr_base->header,
      49              :                                                 sizeof(struct pcache_segment_info),
      50              :                                                 PCACHE_SEG_INFO_SIZE,
      51        31999 :                                                 &cache_seg->cache_seg_info);
      52        31999 :         if (IS_ERR(cache_seg_info_addr)) {
      53            0 :                 ret = PTR_ERR(cache_seg_info_addr);
      54            0 :                 goto out;
      55        31999 :         } else if (!cache_seg_info_addr) {
      56            0 :                 ret = -EIO;
      57            0 :                 goto out;
      58              :         }
      59        31999 :         cache_seg->info_index = cache_seg_info_addr - cache_seg_info_addr_base;
      60        31999 : out:
      61        31999 :         mutex_unlock(&cache_seg->info_lock);
      62              : 
      63        31999 :         if (ret)
      64            0 :                 pcache_dev_err(pcache, "can't read segment info of segment: %u, ret: %d\n",
      65              :                               cache_seg->segment.seg_id, ret);
      66        31999 :         return ret;
      67              : }
      68              : 
      69        31999 : static int cache_seg_ctrl_load(struct pcache_cache_segment *cache_seg)
      70              : {
      71        31999 :         struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
      72        31999 :         struct pcache_cache_seg_gen cache_seg_gen, *cache_seg_gen_addr;
      73        31999 :         int ret = 0;
      74              : 
      75        31999 :         mutex_lock(&cache_seg->ctrl_lock);
      76        31999 :         cache_seg_gen_addr = pcache_meta_find_latest(&cache_seg_ctrl->gen->header,
      77              :                                              sizeof(struct pcache_cache_seg_gen),
      78              :                                              sizeof(struct pcache_cache_seg_gen),
      79              :                                              &cache_seg_gen);
      80        31999 :         if (IS_ERR(cache_seg_gen_addr)) {
      81            0 :                 ret = PTR_ERR(cache_seg_gen_addr);
      82            0 :                 goto out;
      83              :         }
      84              : 
      85        31999 :         if (!cache_seg_gen_addr) {
      86            0 :                 cache_seg->gen = 0;
      87            0 :                 cache_seg->gen_seq = 0;
      88            0 :                 cache_seg->gen_index = 0;
      89            0 :                 goto out;
      90              :         }
      91              : 
      92        31999 :         cache_seg->gen = cache_seg_gen.gen;
      93        31999 :         cache_seg->gen_seq = cache_seg_gen.header.seq;
      94        31999 :         cache_seg->gen_index = (cache_seg_gen_addr - cache_seg_ctrl->gen);
      95        31999 : out:
      96        31999 :         mutex_unlock(&cache_seg->ctrl_lock);
      97              : 
      98        31999 :         return ret;
      99              : }
     100              : 
     101       146505 : static inline struct pcache_cache_seg_gen *get_cache_seg_gen_addr(struct pcache_cache_segment *cache_seg)
     102              : {
     103       146505 :         struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
     104              : 
     105       146505 :         return (cache_seg_ctrl->gen + cache_seg->gen_index);
     106              : }
     107              : 
     108       146505 : static void cache_seg_ctrl_write(struct pcache_cache_segment *cache_seg)
     109              : {
     110       146505 :         struct pcache_cache_seg_gen cache_seg_gen;
     111              : 
     112       146505 :         mutex_lock(&cache_seg->ctrl_lock);
     113       146505 :         cache_seg_gen.gen = cache_seg->gen;
     114       146505 :         cache_seg_gen.header.seq = ++cache_seg->gen_seq;
     115       146505 :         cache_seg_gen.header.crc = pcache_meta_crc(&cache_seg_gen.header,
     116              :                                                  sizeof(struct pcache_cache_seg_gen));
     117              : 
     118       146505 :         memcpy_flushcache(get_cache_seg_gen_addr(cache_seg), &cache_seg_gen, sizeof(struct pcache_cache_seg_gen));
     119       146505 :         pmem_wmb();
     120              : 
     121       146503 :         cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX;
     122       146503 :         mutex_unlock(&cache_seg->ctrl_lock);
     123       146505 : }
     124              : 
     125        74548 : static void cache_seg_ctrl_init(struct pcache_cache_segment *cache_seg)
     126              : {
     127        74548 :         cache_seg->gen = 0;
     128        74548 :         cache_seg->gen_seq = 0;
     129        74548 :         cache_seg->gen_index = 0;
     130        74548 :         cache_seg_ctrl_write(cache_seg);
     131              : }
     132              : 
     133        31999 : static int cache_seg_meta_load(struct pcache_cache_segment *cache_seg)
     134              : {
     135        31999 :         int ret;
     136              : 
     137        31999 :         ret = cache_seg_info_load(cache_seg);
     138        31999 :         if (ret)
     139            0 :                 goto err;
     140              : 
     141        31999 :         ret = cache_seg_ctrl_load(cache_seg);
     142        31999 :         if (ret)
     143              :                 goto err;
     144              : 
     145              :         return 0;
     146              : err:
     147              :         return ret;
     148              : }
     149              : 
     150              : /**
     151              :  * cache_seg_set_next_seg - Sets the ID of the next segment
     152              :  * @cache_seg: Pointer to the cache segment structure.
     153              :  * @seg_id: The segment ID to set as the next segment.
     154              :  *
     155              :  * A pcache_cache allocates multiple cache segments, which are linked together
     156              :  * through next_seg. When loading a pcache_cache, the first cache segment can
     157              :  * be found using cache->seg_id, which allows access to all the cache segments.
     158              :  */
     159        74280 : void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id)
     160              : {
     161        74280 :         cache_seg->cache_seg_info.flags |= PCACHE_SEG_INFO_FLAGS_HAS_NEXT;
     162        74280 :         cache_seg->cache_seg_info.next_seg = seg_id;
     163        74280 :         cache_seg_info_write(cache_seg);
     164        74280 : }
     165              : 
     166       106547 : int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id,
     167              :                    bool new_cache)
     168              : {
     169       106547 :         struct pcache_cache_dev *cache_dev = cache->cache_dev;
     170       106547 :         struct pcache_cache_segment *cache_seg = &cache->segments[cache_seg_id];
     171       106547 :         struct pcache_segment_init_options seg_options = { 0 };
     172       106547 :         struct pcache_segment *segment = &cache_seg->segment;
     173       106547 :         int ret;
     174              : 
     175       106547 :         cache_seg->cache = cache;
     176       106547 :         cache_seg->cache_seg_id = cache_seg_id;
     177       106547 :         spin_lock_init(&cache_seg->gen_lock);
     178       106547 :         atomic_set(&cache_seg->refs, 0);
     179       106547 :         mutex_init(&cache_seg->info_lock);
     180       106547 :         mutex_init(&cache_seg->ctrl_lock);
     181              : 
     182              :         /* init pcache_segment */
     183       106547 :         seg_options.type = PCACHE_SEGMENT_TYPE_CACHE_DATA;
     184       106547 :         seg_options.data_off = PCACHE_CACHE_SEG_CTRL_OFF + PCACHE_CACHE_SEG_CTRL_SIZE;
     185       106547 :         seg_options.seg_id = seg_id;
     186       106547 :         seg_options.seg_info = &cache_seg->cache_seg_info;
     187       106547 :         pcache_segment_init(cache_dev, segment, &seg_options);
     188              : 
     189       106547 :         cache_seg->cache_seg_ctrl = CACHE_DEV_SEGMENT(cache_dev, seg_id) + PCACHE_CACHE_SEG_CTRL_OFF;
     190              : 
     191       106547 :         if (new_cache) {
     192        74548 :                 cache_dev_zero_range(cache_dev, CACHE_DEV_SEGMENT(cache_dev, seg_id),
     193              :                                      PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX +
     194              :                                      PCACHE_CACHE_SEG_CTRL_SIZE);
     195              : 
     196        74548 :                 cache_seg_ctrl_init(cache_seg);
     197              : 
     198        74548 :                 cache_seg->info_index = 0;
     199        74548 :                 cache_seg_info_write(cache_seg);
     200              : 
     201              :                 /* clear outdated kset in segment */
     202        74548 :                 memcpy_flushcache(segment->data, &pcache_empty_kset, sizeof(struct pcache_cache_kset_onmedia));
     203        74548 :                 pmem_wmb();
     204              :         } else {
     205        31999 :                 ret = cache_seg_meta_load(cache_seg);
     206        31999 :                 if (ret)
     207            0 :                         goto err;
     208              :         }
     209              : 
     210              :         return 0;
     211            0 : err:
     212            0 :         return ret;
     213              : }
     214              : 
     215              : /**
     216              :  * get_cache_segment - Retrieves a free cache segment from the cache.
     217              :  * @cache: Pointer to the cache structure.
     218              :  *
     219              :  * This function attempts to find a free cache segment that can be used.
     220              :  * It locks the segment map and checks for the next available segment ID.
     221              :  * If a free segment is found, it initializes it and returns a pointer to the
     222              :  * cache segment structure. Returns NULL if no segments are available.
     223              :  */
     224      9959387 : struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache)
     225              : {
     226      9959387 :         struct pcache_cache_segment *cache_seg;
     227      9959387 :         u32 seg_id;
     228              : 
     229      9959387 :         spin_lock(&cache->seg_map_lock);
     230      9965891 : again:
     231      9965891 :         seg_id = find_next_zero_bit(cache->seg_map, cache->n_segs, cache->last_cache_seg);
     232      9965891 :         if (seg_id == cache->n_segs) {
     233              :                 /* reset the hint of ->last_cache_seg and retry */
     234      9886601 :                 if (cache->last_cache_seg) {
     235         6318 :                         cache->last_cache_seg = 0;
     236         6318 :                         goto again;
     237              :                 }
     238      9880283 :                 cache->cache_full = true;
     239      9880283 :                 spin_unlock(&cache->seg_map_lock);
     240      9880283 :                 return NULL;
     241              :         }
     242              : 
     243              :         /*
     244              :          * found an available cache_seg, mark it used in seg_map
     245              :          * and update the search hint ->last_cache_seg
     246              :          */
     247        79290 :         __set_bit(seg_id, cache->seg_map);
     248        79290 :         cache->last_cache_seg = seg_id;
     249        79290 :         spin_unlock(&cache->seg_map_lock);
     250              : 
     251        79290 :         cache_seg = &cache->segments[seg_id];
     252        79290 :         cache_seg->cache_seg_id = seg_id;
     253              : 
     254        79290 :         return cache_seg;
     255              : }
     256              : 
     257        71957 : static void cache_seg_gen_increase(struct pcache_cache_segment *cache_seg)
     258              : {
     259        71957 :         spin_lock(&cache_seg->gen_lock);
     260        71957 :         cache_seg->gen++;
     261        71957 :         spin_unlock(&cache_seg->gen_lock);
     262              : 
     263        71957 :         cache_seg_ctrl_write(cache_seg);
     264        71957 : }
     265              : 
     266     57290146 : void cache_seg_get(struct pcache_cache_segment *cache_seg)
     267              : {
     268     57290146 :         atomic_inc(&cache_seg->refs);
     269     57815829 : }
     270              : 
     271        71957 : static void cache_seg_invalidate(struct pcache_cache_segment *cache_seg)
     272              : {
     273        71957 :         struct pcache_cache *cache;
     274              : 
     275        71957 :         cache = cache_seg->cache;
     276        71957 :         cache_seg_gen_increase(cache_seg);
     277              : 
     278        71957 :         spin_lock(&cache->seg_map_lock);
     279        71957 :         if (cache->cache_full)
     280         5379 :                 cache->cache_full = false;
     281        71957 :         __clear_bit(cache_seg->cache_seg_id, cache->seg_map);
     282        71957 :         spin_unlock(&cache->seg_map_lock);
     283              : 
     284        71957 :         pcache_defer_reqs_kick(CACHE_TO_PCACHE(cache));
     285              :         /* clean_work will clean the bad key in key_tree*/
     286        71957 :         queue_work(cache_get_wq(cache), &cache->clean_work);
     287        71957 : }
     288              : 
     289     34229840 : void cache_seg_put(struct pcache_cache_segment *cache_seg)
     290              : {
     291     34229840 :         if (atomic_dec_and_test(&cache_seg->refs))
     292        71957 :                 cache_seg_invalidate(cache_seg);
     293     34238632 : }
        

Generated by: LCOV version 2.0-1