~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/bcachefs/buckets.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /fs/bcachefs/buckets.h (Architecture i386) and /fs/bcachefs/buckets.h (Architecture sparc64)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*                                                  2 /*
  3  * Code for manipulating bucket marks for garb      3  * Code for manipulating bucket marks for garbage collection.
  4  *                                                  4  *
  5  * Copyright 2014 Datera, Inc.                      5  * Copyright 2014 Datera, Inc.
  6  */                                                 6  */
  7                                                     7 
  8 #ifndef _BUCKETS_H                                  8 #ifndef _BUCKETS_H
  9 #define _BUCKETS_H                                  9 #define _BUCKETS_H
 10                                                    10 
 11 #include "buckets_types.h"                         11 #include "buckets_types.h"
 12 #include "extents.h"                               12 #include "extents.h"
 13 #include "sb-members.h"                            13 #include "sb-members.h"
 14                                                    14 
 15 static inline u64 sector_to_bucket(const struc     15 static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
 16 {                                                  16 {
 17         return div_u64(s, ca->mi.bucket_size);     17         return div_u64(s, ca->mi.bucket_size);
 18 }                                                  18 }
 19                                                    19 
 20 static inline sector_t bucket_to_sector(const      20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
 21 {                                                  21 {
 22         return ((sector_t) b) * ca->mi.bucket_     22         return ((sector_t) b) * ca->mi.bucket_size;
 23 }                                                  23 }
 24                                                    24 
 25 static inline sector_t bucket_remainder(const      25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
 26 {                                                  26 {
 27         u32 remainder;                             27         u32 remainder;
 28                                                    28 
 29         div_u64_rem(s, ca->mi.bucket_size, &re     29         div_u64_rem(s, ca->mi.bucket_size, &remainder);
 30         return remainder;                          30         return remainder;
 31 }                                                  31 }
 32                                                    32 
 33 static inline u64 sector_to_bucket_and_offset(     33 static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
 34 {                                                  34 {
 35         return div_u64_rem(s, ca->mi.bucket_si     35         return div_u64_rem(s, ca->mi.bucket_size, offset);
 36 }                                                  36 }
 37                                                    37 
 38 #define for_each_bucket(_b, _buckets)              38 #define for_each_bucket(_b, _buckets)                           \
 39         for (_b = (_buckets)->b + (_buckets)->     39         for (_b = (_buckets)->b + (_buckets)->first_bucket;     \
 40              _b < (_buckets)->b + (_buckets)->     40              _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
 41                                                    41 
 42 /*                                                 42 /*
 43  * Ugly hack alert:                                43  * Ugly hack alert:
 44  *                                                 44  *
 45  * We need to cram a spinlock in a single byte     45  * We need to cram a spinlock in a single byte, because that's what we have left
 46  * in struct bucket, and we care about the siz     46  * in struct bucket, and we care about the size of these - during fsck, we need
 47  * in memory state for every single bucket on      47  * in memory state for every single bucket on every device.
 48  *                                                 48  *
 49  * We used to do                                   49  * We used to do
 50  *   while (xchg(&b->lock, 1) cpu_relax();         50  *   while (xchg(&b->lock, 1) cpu_relax();
 51  * but, it turns out not all architectures sup     51  * but, it turns out not all architectures support xchg on a single byte.
 52  *                                                 52  *
 53  * So now we use bit_spin_lock(), with fun gam     53  * So now we use bit_spin_lock(), with fun games since we can't burn a whole
 54  * ulong for this - we just need to make sure      54  * ulong for this - we just need to make sure the lock bit always ends up in the
 55  * first byte.                                     55  * first byte.
 56  */                                                56  */
 57                                                    57 
 58 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__      58 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 59 #define BUCKET_LOCK_BITNR       0                  59 #define BUCKET_LOCK_BITNR       0
 60 #else                                              60 #else
 61 #define BUCKET_LOCK_BITNR       (BITS_PER_LONG     61 #define BUCKET_LOCK_BITNR       (BITS_PER_LONG - 1)
 62 #endif                                             62 #endif
 63                                                    63 
 64 union ulong_byte_assert {                          64 union ulong_byte_assert {
 65         ulong   ulong;                             65         ulong   ulong;
 66         u8      byte;                              66         u8      byte;
 67 };                                                 67 };
 68                                                    68 
 69 static inline void bucket_unlock(struct bucket     69 static inline void bucket_unlock(struct bucket *b)
 70 {                                                  70 {
 71         BUILD_BUG_ON(!((union ulong_byte_asser     71         BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
 72                                                    72 
 73         clear_bit_unlock(BUCKET_LOCK_BITNR, (v     73         clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
 74         wake_up_bit((void *) &b->lock, BUCKET_     74         wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
 75 }                                                  75 }
 76                                                    76 
 77 static inline void bucket_lock(struct bucket *     77 static inline void bucket_lock(struct bucket *b)
 78 {                                                  78 {
 79         wait_on_bit_lock((void *) &b->lock, BU     79         wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
 80                          TASK_UNINTERRUPTIBLE)     80                          TASK_UNINTERRUPTIBLE);
 81 }                                                  81 }
 82                                                    82 
 83 static inline struct bucket *gc_bucket(struct      83 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
 84 {                                                  84 {
 85         return genradix_ptr(&ca->buckets_gc, b     85         return genradix_ptr(&ca->buckets_gc, b);
 86 }                                                  86 }
 87                                                    87 
 88 static inline struct bucket_gens *bucket_gens(     88 static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
 89 {                                                  89 {
 90         return rcu_dereference_check(ca->bucke     90         return rcu_dereference_check(ca->bucket_gens,
 91                                      !ca->fs |     91                                      !ca->fs ||
 92                                      percpu_rw     92                                      percpu_rwsem_is_held(&ca->fs->mark_lock) ||
 93                                      lockdep_i     93                                      lockdep_is_held(&ca->fs->state_lock) ||
 94                                      lockdep_i     94                                      lockdep_is_held(&ca->bucket_lock));
 95 }                                                  95 }
 96                                                    96 
 97 static inline u8 *bucket_gen(struct bch_dev *c     97 static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
 98 {                                                  98 {
 99         struct bucket_gens *gens = bucket_gens     99         struct bucket_gens *gens = bucket_gens(ca);
100                                                   100 
101         if (b - gens->first_bucket >= gens->nb    101         if (b - gens->first_bucket >= gens->nbuckets_minus_first)
102                 return NULL;                      102                 return NULL;
103         return gens->b + b;                       103         return gens->b + b;
104 }                                                 104 }
105                                                   105 
106 static inline int bucket_gen_get_rcu(struct bc    106 static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b)
107 {                                                 107 {
108         u8 *gen = bucket_gen(ca, b);              108         u8 *gen = bucket_gen(ca, b);
109         return gen ? *gen : -1;                   109         return gen ? *gen : -1;
110 }                                                 110 }
111                                                   111 
112 static inline int bucket_gen_get(struct bch_de    112 static inline int bucket_gen_get(struct bch_dev *ca, size_t b)
113 {                                                 113 {
114         rcu_read_lock();                          114         rcu_read_lock();
115         int ret = bucket_gen_get_rcu(ca, b);      115         int ret = bucket_gen_get_rcu(ca, b);
116         rcu_read_unlock();                        116         rcu_read_unlock();
117         return ret;                               117         return ret;
118 }                                                 118 }
119                                                   119 
120 static inline size_t PTR_BUCKET_NR(const struc    120 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
121                                    const struc    121                                    const struct bch_extent_ptr *ptr)
122 {                                                 122 {
123         return sector_to_bucket(ca, ptr->offse    123         return sector_to_bucket(ca, ptr->offset);
124 }                                                 124 }
125                                                   125 
126 static inline struct bpos PTR_BUCKET_POS(const    126 static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
127                                          const    127                                          const struct bch_extent_ptr *ptr)
128 {                                                 128 {
129         return POS(ptr->dev, PTR_BUCKET_NR(ca,    129         return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
130 }                                                 130 }
131                                                   131 
132 static inline struct bpos PTR_BUCKET_POS_OFFSE    132 static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
133                                                   133                                                 const struct bch_extent_ptr *ptr,
134                                                   134                                                 u32 *bucket_offset)
135 {                                                 135 {
136         return POS(ptr->dev, sector_to_bucket_    136         return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
137 }                                                 137 }
138                                                   138 
139 static inline struct bucket *PTR_GC_BUCKET(str    139 static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
140                                            con    140                                            const struct bch_extent_ptr *ptr)
141 {                                                 141 {
142         return gc_bucket(ca, PTR_BUCKET_NR(ca,    142         return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
143 }                                                 143 }
144                                                   144 
145 static inline enum bch_data_type ptr_data_type    145 static inline enum bch_data_type ptr_data_type(const struct bkey *k,
146                                                   146                                                const struct bch_extent_ptr *ptr)
147 {                                                 147 {
148         if (bkey_is_btree_ptr(k))                 148         if (bkey_is_btree_ptr(k))
149                 return BCH_DATA_btree;            149                 return BCH_DATA_btree;
150                                                   150 
151         return ptr->cached ? BCH_DATA_cached :    151         return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
152 }                                                 152 }
153                                                   153 
154 static inline s64 ptr_disk_sectors(s64 sectors    154 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
155 {                                                 155 {
156         EBUG_ON(sectors < 0);                     156         EBUG_ON(sectors < 0);
157                                                   157 
158         return crc_is_compressed(p.crc)           158         return crc_is_compressed(p.crc)
159                 ? DIV_ROUND_UP_ULL(sectors * p    159                 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
160                                    p.crc.uncom    160                                    p.crc.uncompressed_size)
161                 : sectors;                        161                 : sectors;
162 }                                                 162 }
163                                                   163 
164 static inline int gen_cmp(u8 a, u8 b)             164 static inline int gen_cmp(u8 a, u8 b)
165 {                                                 165 {
166         return (s8) (a - b);                      166         return (s8) (a - b);
167 }                                                 167 }
168                                                   168 
169 static inline int gen_after(u8 a, u8 b)           169 static inline int gen_after(u8 a, u8 b)
170 {                                                 170 {
171         int r = gen_cmp(a, b);                    171         int r = gen_cmp(a, b);
172                                                   172 
173         return r > 0 ? r : 0;                     173         return r > 0 ? r : 0;
174 }                                                 174 }
175                                                   175 
176 static inline int dev_ptr_stale_rcu(struct bch    176 static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
177 {                                                 177 {
178         int gen = bucket_gen_get_rcu(ca, PTR_B    178         int gen = bucket_gen_get_rcu(ca, PTR_BUCKET_NR(ca, ptr));
179         return gen < 0 ? gen : gen_after(gen,     179         return gen < 0 ? gen : gen_after(gen, ptr->gen);
180 }                                                 180 }
181                                                   181 
182 /**                                               182 /**
183  * dev_ptr_stale() - check if a pointer points    183  * dev_ptr_stale() - check if a pointer points into a bucket that has been
184  * invalidated.                                   184  * invalidated.
185  */                                               185  */
186 static inline int dev_ptr_stale(struct bch_dev    186 static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
187 {                                                 187 {
188         rcu_read_lock();                          188         rcu_read_lock();
189         int ret = dev_ptr_stale_rcu(ca, ptr);     189         int ret = dev_ptr_stale_rcu(ca, ptr);
190         rcu_read_unlock();                        190         rcu_read_unlock();
191         return ret;                               191         return ret;
192 }                                                 192 }
193                                                   193 
194 /* Device usage: */                               194 /* Device usage: */
195                                                   195 
196 void bch2_dev_usage_read_fast(struct bch_dev *    196 void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
197 static inline struct bch_dev_usage bch2_dev_us    197 static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
198 {                                                 198 {
199         struct bch_dev_usage ret;                 199         struct bch_dev_usage ret;
200                                                   200 
201         bch2_dev_usage_read_fast(ca, &ret);       201         bch2_dev_usage_read_fast(ca, &ret);
202         return ret;                               202         return ret;
203 }                                                 203 }
204                                                   204 
205 void bch2_dev_usage_to_text(struct printbuf *,    205 void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
206                                                   206 
207 static inline u64 bch2_dev_buckets_reserved(st    207 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
208 {                                                 208 {
209         s64 reserved = 0;                         209         s64 reserved = 0;
210                                                   210 
211         switch (watermark) {                      211         switch (watermark) {
212         case BCH_WATERMARK_NR:                    212         case BCH_WATERMARK_NR:
213                 BUG();                            213                 BUG();
214         case BCH_WATERMARK_stripe:                214         case BCH_WATERMARK_stripe:
215                 reserved += ca->mi.nbuckets >>    215                 reserved += ca->mi.nbuckets >> 6;
216                 fallthrough;                      216                 fallthrough;
217         case BCH_WATERMARK_normal:                217         case BCH_WATERMARK_normal:
218                 reserved += ca->mi.nbuckets >>    218                 reserved += ca->mi.nbuckets >> 6;
219                 fallthrough;                      219                 fallthrough;
220         case BCH_WATERMARK_copygc:                220         case BCH_WATERMARK_copygc:
221                 reserved += ca->nr_btree_reser    221                 reserved += ca->nr_btree_reserve;
222                 fallthrough;                      222                 fallthrough;
223         case BCH_WATERMARK_btree:                 223         case BCH_WATERMARK_btree:
224                 reserved += ca->nr_btree_reser    224                 reserved += ca->nr_btree_reserve;
225                 fallthrough;                      225                 fallthrough;
226         case BCH_WATERMARK_btree_copygc:          226         case BCH_WATERMARK_btree_copygc:
227         case BCH_WATERMARK_reclaim:               227         case BCH_WATERMARK_reclaim:
228         case BCH_WATERMARK_interior_updates:      228         case BCH_WATERMARK_interior_updates:
229                 break;                            229                 break;
230         }                                         230         }
231                                                   231 
232         return reserved;                          232         return reserved;
233 }                                                 233 }
234                                                   234 
235 static inline u64 dev_buckets_free(struct bch_    235 static inline u64 dev_buckets_free(struct bch_dev *ca,
236                                    struct bch_    236                                    struct bch_dev_usage usage,
237                                    enum bch_wa    237                                    enum bch_watermark watermark)
238 {                                                 238 {
239         return max_t(s64, 0,                      239         return max_t(s64, 0,
240                      usage.d[BCH_DATA_free].bu    240                      usage.d[BCH_DATA_free].buckets -
241                      ca->nr_open_buckets -        241                      ca->nr_open_buckets -
242                      bch2_dev_buckets_reserved    242                      bch2_dev_buckets_reserved(ca, watermark));
243 }                                                 243 }
244                                                   244 
245 static inline u64 __dev_buckets_available(stru    245 static inline u64 __dev_buckets_available(struct bch_dev *ca,
246                                           stru    246                                           struct bch_dev_usage usage,
247                                           enum    247                                           enum bch_watermark watermark)
248 {                                                 248 {
249         return max_t(s64, 0,                      249         return max_t(s64, 0,
250                        usage.d[BCH_DATA_free].    250                        usage.d[BCH_DATA_free].buckets
251                      + usage.d[BCH_DATA_cached    251                      + usage.d[BCH_DATA_cached].buckets
252                      + usage.d[BCH_DATA_need_g    252                      + usage.d[BCH_DATA_need_gc_gens].buckets
253                      + usage.d[BCH_DATA_need_d    253                      + usage.d[BCH_DATA_need_discard].buckets
254                      - ca->nr_open_buckets        254                      - ca->nr_open_buckets
255                      - bch2_dev_buckets_reserv    255                      - bch2_dev_buckets_reserved(ca, watermark));
256 }                                                 256 }
257                                                   257 
258 static inline u64 dev_buckets_available(struct    258 static inline u64 dev_buckets_available(struct bch_dev *ca,
259                                         enum b    259                                         enum bch_watermark watermark)
260 {                                                 260 {
261         return __dev_buckets_available(ca, bch    261         return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
262 }                                                 262 }
263                                                   263 
264 /* Filesystem usage: */                           264 /* Filesystem usage: */
265                                                   265 
266 static inline unsigned dev_usage_u64s(void)       266 static inline unsigned dev_usage_u64s(void)
267 {                                                 267 {
268         return sizeof(struct bch_dev_usage) /     268         return sizeof(struct bch_dev_usage) / sizeof(u64);
269 }                                                 269 }
270                                                   270 
271 struct bch_fs_usage_short                         271 struct bch_fs_usage_short
272 bch2_fs_usage_read_short(struct bch_fs *);        272 bch2_fs_usage_read_short(struct bch_fs *);
273                                                   273 
274 int bch2_bucket_ref_update(struct btree_trans     274 int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *,
275                            struct bkey_s_c, co    275                            struct bkey_s_c, const struct bch_extent_ptr *,
276                            s64, enum bch_data_    276                            s64, enum bch_data_type, u8, u8, u32 *);
277                                                   277 
278 int bch2_check_fix_ptrs(struct btree_trans *,     278 int bch2_check_fix_ptrs(struct btree_trans *,
279                         enum btree_id, unsigne    279                         enum btree_id, unsigned, struct bkey_s_c,
280                         enum btree_iter_update    280                         enum btree_iter_update_trigger_flags);
281                                                   281 
282 int bch2_trigger_extent(struct btree_trans *,     282 int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
283                         struct bkey_s_c, struc    283                         struct bkey_s_c, struct bkey_s,
284                         enum btree_iter_update    284                         enum btree_iter_update_trigger_flags);
285 int bch2_trigger_reservation(struct btree_tran    285 int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
286                           struct bkey_s_c, str    286                           struct bkey_s_c, struct bkey_s,
287                           enum btree_iter_upda    287                           enum btree_iter_update_trigger_flags);
288                                                   288 
289 #define trigger_run_overwrite_then_insert(_fn,    289 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
290 ({                                                290 ({                                                                                              \
291         int ret = 0;                              291         int ret = 0;                                                                            \
292                                                   292                                                                                                 \
293         if (_old.k->type)                         293         if (_old.k->type)                                                                       \
294                 ret = _fn(_trans, _btree_id, _    294                 ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert);     \
295         if (!ret && _new.k->type)                 295         if (!ret && _new.k->type)                                                               \
296                 ret = _fn(_trans, _btree_id, _    296                 ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
297         ret;                                      297         ret;                                                                                    \
298 })                                                298 })
299                                                   299 
300 void bch2_trans_account_disk_usage_change(stru    300 void bch2_trans_account_disk_usage_change(struct btree_trans *);
301                                                   301 
302 int bch2_trans_mark_metadata_bucket(struct btr    302 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
303                                     enum bch_d    303                                     enum bch_data_type, unsigned,
304                                     enum btree    304                                     enum btree_iter_update_trigger_flags);
305 int bch2_trans_mark_dev_sb(struct bch_fs *, st    305 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
306                                     enum btree    306                                     enum btree_iter_update_trigger_flags);
307 int bch2_trans_mark_dev_sbs_flags(struct bch_f    307 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
308                                     enum btree    308                                     enum btree_iter_update_trigger_flags);
309 int bch2_trans_mark_dev_sbs(struct bch_fs *);     309 int bch2_trans_mark_dev_sbs(struct bch_fs *);
310                                                   310 
311 static inline bool is_superblock_bucket(struct    311 static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
312 {                                                 312 {
313         struct bch_sb_layout *layout = &ca->di    313         struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
314         u64 b_offset    = bucket_to_sector(ca,    314         u64 b_offset    = bucket_to_sector(ca, b);
315         u64 b_end       = bucket_to_sector(ca,    315         u64 b_end       = bucket_to_sector(ca, b + 1);
316         unsigned i;                               316         unsigned i;
317                                                   317 
318         if (!b)                                   318         if (!b)
319                 return true;                      319                 return true;
320                                                   320 
321         for (i = 0; i < layout->nr_superblocks    321         for (i = 0; i < layout->nr_superblocks; i++) {
322                 u64 offset = le64_to_cpu(layou    322                 u64 offset = le64_to_cpu(layout->sb_offset[i]);
323                 u64 end = offset + (1 << layou    323                 u64 end = offset + (1 << layout->sb_max_size_bits);
324                                                   324 
325                 if (!(offset >= b_end || end <    325                 if (!(offset >= b_end || end <= b_offset))
326                         return true;              326                         return true;
327         }                                         327         }
328                                                   328 
329         return false;                             329         return false;
330 }                                                 330 }
331                                                   331 
332 static inline const char *bch2_data_type_str(e    332 static inline const char *bch2_data_type_str(enum bch_data_type type)
333 {                                                 333 {
334         return type < BCH_DATA_NR                 334         return type < BCH_DATA_NR
335                 ? __bch2_data_types[type]         335                 ? __bch2_data_types[type]
336                 : "(invalid data type)";          336                 : "(invalid data type)";
337 }                                                 337 }
338                                                   338 
339 /* disk reservations: */                          339 /* disk reservations: */
340                                                   340 
341 static inline void bch2_disk_reservation_put(s    341 static inline void bch2_disk_reservation_put(struct bch_fs *c,
342                                              s    342                                              struct disk_reservation *res)
343 {                                                 343 {
344         if (res->sectors) {                       344         if (res->sectors) {
345                 this_cpu_sub(*c->online_reserv    345                 this_cpu_sub(*c->online_reserved, res->sectors);
346                 res->sectors = 0;                 346                 res->sectors = 0;
347         }                                         347         }
348 }                                                 348 }
349                                                   349 
350 enum bch_reservation_flags {                      350 enum bch_reservation_flags {
351         BCH_DISK_RESERVATION_NOFAIL     = 1 <<    351         BCH_DISK_RESERVATION_NOFAIL     = 1 << 0,
352         BCH_DISK_RESERVATION_PARTIAL    = 1 <<    352         BCH_DISK_RESERVATION_PARTIAL    = 1 << 1,
353 };                                                353 };
354                                                   354 
355 int __bch2_disk_reservation_add(struct bch_fs     355 int __bch2_disk_reservation_add(struct bch_fs *, struct disk_reservation *,
356                                 u64, enum bch_    356                                 u64, enum bch_reservation_flags);
357                                                   357 
358 static inline int bch2_disk_reservation_add(st    358 static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
359                                             u6    359                                             u64 sectors, enum bch_reservation_flags flags)
360 {                                                 360 {
361 #ifdef __KERNEL__                                 361 #ifdef __KERNEL__
362         u64 old, new;                             362         u64 old, new;
363                                                   363 
364         old = this_cpu_read(c->pcpu->sectors_a    364         old = this_cpu_read(c->pcpu->sectors_available);
365         do {                                      365         do {
366                 if (sectors > old)                366                 if (sectors > old)
367                         return __bch2_disk_res    367                         return __bch2_disk_reservation_add(c, res, sectors, flags);
368                                                   368 
369                 new = old - sectors;              369                 new = old - sectors;
370         } while (!this_cpu_try_cmpxchg(c->pcpu    370         } while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new));
371                                                   371 
372         this_cpu_add(*c->online_reserved, sect    372         this_cpu_add(*c->online_reserved, sectors);
373         res->sectors                    += sec    373         res->sectors                    += sectors;
374         return 0;                                 374         return 0;
375 #else                                             375 #else
376         return __bch2_disk_reservation_add(c,     376         return __bch2_disk_reservation_add(c, res, sectors, flags);
377 #endif                                            377 #endif
378 }                                                 378 }
379                                                   379 
380 static inline struct disk_reservation             380 static inline struct disk_reservation
381 bch2_disk_reservation_init(struct bch_fs *c, u    381 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
382 {                                                 382 {
383         return (struct disk_reservation) {        383         return (struct disk_reservation) {
384                 .sectors        = 0,              384                 .sectors        = 0,
385 #if 0                                             385 #if 0
386                 /* not used yet: */               386                 /* not used yet: */
387                 .gen            = c->capacity_    387                 .gen            = c->capacity_gen,
388 #endif                                            388 #endif
389                 .nr_replicas    = nr_replicas,    389                 .nr_replicas    = nr_replicas,
390         };                                        390         };
391 }                                                 391 }
392                                                   392 
393 static inline int bch2_disk_reservation_get(st    393 static inline int bch2_disk_reservation_get(struct bch_fs *c,
394                                             st    394                                             struct disk_reservation *res,
395                                             u6    395                                             u64 sectors, unsigned nr_replicas,
396                                             in    396                                             int flags)
397 {                                                 397 {
398         *res = bch2_disk_reservation_init(c, n    398         *res = bch2_disk_reservation_init(c, nr_replicas);
399                                                   399 
400         return bch2_disk_reservation_add(c, re    400         return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
401 }                                                 401 }
402                                                   402 
403 #define RESERVE_FACTOR  6                         403 #define RESERVE_FACTOR  6
404                                                   404 
405 static inline u64 avail_factor(u64 r)             405 static inline u64 avail_factor(u64 r)
406 {                                                 406 {
407         return div_u64(r << RESERVE_FACTOR, (1    407         return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
408 }                                                 408 }
409                                                   409 
410 void bch2_buckets_nouse_free(struct bch_fs *);    410 void bch2_buckets_nouse_free(struct bch_fs *);
411 int bch2_buckets_nouse_alloc(struct bch_fs *);    411 int bch2_buckets_nouse_alloc(struct bch_fs *);
412                                                   412 
413 int bch2_dev_buckets_resize(struct bch_fs *, s    413 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
414 void bch2_dev_buckets_free(struct bch_dev *);     414 void bch2_dev_buckets_free(struct bch_dev *);
415 int bch2_dev_buckets_alloc(struct bch_fs *, st    415 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
416                                                   416 
417 #endif /* _BUCKETS_H */                           417 #endif /* _BUCKETS_H */
418                                                   418 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php