]> git.dujemihanovic.xyz Git - linux.git/commitdiff
bcachefs: btree cache counters should be size_t
authorKent Overstreet <kent.overstreet@linux.dev>
Thu, 5 Sep 2024 23:25:01 +0000 (19:25 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sat, 21 Sep 2024 15:39:48 +0000 (11:39 -0400)
32 bits won't overflow any time soon, but size_t is the correct type for
counting objects in memory.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_io.h
fs/bcachefs/btree_types.h
fs/bcachefs/journal_reclaim.c
fs/bcachefs/super.c

index 5853018226927c33f634d81551890939f8917637..a7eb07d6e7f94efc87544633a5fec5af980c33c3 100644 (file)
@@ -32,24 +32,24 @@ const char * const bch2_btree_node_flags[] = {
 
 void bch2_recalc_btree_reserve(struct bch_fs *c)
 {
-       unsigned i, reserve = 16;
+       unsigned reserve = 16;
 
        if (!c->btree_roots_known[0].b)
                reserve += 8;
 
-       for (i = 0; i < btree_id_nr_alive(c); i++) {
+       for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
                struct btree_root *r = bch2_btree_id_root(c, i);
 
                if (r->b)
                        reserve += min_t(unsigned, 1, r->b->c.level) * 8;
        }
 
-       c->btree_cache.reserve = reserve;
+       c->btree_cache.nr_reserve = reserve;
 }
 
-static inline unsigned btree_cache_can_free(struct btree_cache *bc)
+static inline size_t btree_cache_can_free(struct btree_cache *bc)
 {
-       return max_t(int, 0, bc->used - bc->reserve);
+       return max_t(int, 0, bc->nr_used - bc->nr_reserve);
 }
 
 static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b)
@@ -87,7 +87,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
 #endif
        b->aux_data = NULL;
 
-       bc->used--;
+       bc->nr_used--;
 
        btree_node_to_freedlist(bc, b);
 }
@@ -167,7 +167,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
 
        bch2_btree_lock_init(&b->c, 0);
 
-       bc->used++;
+       bc->nr_used++;
        list_add(&b->list, &bc->freeable);
        return b;
 }
@@ -194,7 +194,7 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
        b->hash_val = 0;
 
        if (b->c.btree_id < BTREE_ID_NR)
-               --bc->used_by_btree[b->c.btree_id];
+               --bc->nr_by_btree[b->c.btree_id];
 }
 
 int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
@@ -205,7 +205,7 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
        int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash,
                                                bch_btree_cache_params);
        if (!ret && b->c.btree_id < BTREE_ID_NR)
-               bc->used_by_btree[b->c.btree_id]++;
+               bc->nr_by_btree[b->c.btree_id]++;
        return ret;
 }
 
@@ -401,8 +401,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
        unsigned long touched = 0;
        unsigned i, flags;
        unsigned long ret = SHRINK_STOP;
-       bool trigger_writes = atomic_read(&bc->dirty) + nr >=
-               bc->used * 3 / 4;
+       bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >=
+               bc->nr_used * 3 / 4;
 
        if (bch2_btree_shrinker_disabled)
                return SHRINK_STOP;
@@ -439,7 +439,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
                        six_unlock_write(&b->c.lock);
                        six_unlock_intent(&b->c.lock);
                        freed++;
-                       bc->freed++;
+                       bc->nr_freed++;
                }
        }
 restart:
@@ -453,7 +453,7 @@ restart:
                } else if (!btree_node_reclaim(c, b, true)) {
                        freed++;
                        btree_node_data_free(c, b);
-                       bc->freed++;
+                       bc->nr_freed++;
 
                        bch2_btree_node_hash_remove(bc, b);
                        six_unlock_write(&b->c.lock);
@@ -539,7 +539,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
        }
 
        BUG_ON(!bch2_journal_error(&c->journal) &&
-              atomic_read(&c->btree_cache.dirty));
+              atomic_long_read(&c->btree_cache.nr_dirty));
 
        list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
 
@@ -572,7 +572,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
 
        bch2_recalc_btree_reserve(c);
 
-       for (i = 0; i < bc->reserve; i++)
+       for (i = 0; i < bc->nr_reserve; i++)
                if (!__bch2_btree_node_mem_alloc(c))
                        goto err;
 
@@ -739,7 +739,7 @@ got_node:
        }
 
        mutex_lock(&bc->lock);
-       bc->used++;
+       bc->nr_used++;
 got_mem:
        mutex_unlock(&bc->lock);
 
@@ -1353,11 +1353,11 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc
 }
 
 static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
-                                const char *label, unsigned nr)
+                                const char *label, size_t nr)
 {
        prt_printf(out, "%s\t", label);
        prt_human_readable_u64(out, nr * c->opts.btree_node_size);
-       prt_printf(out, " (%u)\n", nr);
+       prt_printf(out, " (%zu)\n", nr);
 }
 
 static const char * const bch2_btree_cache_not_freed_reasons_strs[] = {
@@ -1374,16 +1374,16 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc
        if (!out->nr_tabstops)
                printbuf_tabstop_push(out, 32);
 
-       prt_btree_cache_line(out, c, "total:",          bc->used);
-       prt_btree_cache_line(out, c, "nr dirty:",       atomic_read(&bc->dirty));
+       prt_btree_cache_line(out, c, "total:",          bc->nr_used);
+       prt_btree_cache_line(out, c, "nr dirty:",       atomic_long_read(&bc->nr_dirty));
        prt_printf(out, "cannibalize lock:\t%p\n",      bc->alloc_lock);
        prt_newline(out);
 
-       for (unsigned i = 0; i < ARRAY_SIZE(bc->used_by_btree); i++)
-               prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->used_by_btree[i]);
+       for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++)
+               prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->nr_by_btree[i]);
 
        prt_newline(out);
-       prt_printf(out, "freed:\t%u\n", bc->freed);
+       prt_printf(out, "freed:\t%zu\n", bc->nr_freed);
        prt_printf(out, "not freed:\n");
 
        for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++)
index 18cff98650ded7ae327450d2790d3f51510cef59..aad89ba16b9bd75d85db1b5b9562f1015f4da4d4 100644 (file)
@@ -2031,7 +2031,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
 do_write:
        BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
 
-       atomic_dec(&c->btree_cache.dirty);
+       atomic_long_dec(&c->btree_cache.nr_dirty);
 
        BUG_ON(btree_node_fake(b));
        BUG_ON((b->will_make_reachable != 0) != !b->written);
index 63d76f5c640318494e83a8e152f67f42801980c8..9b01ca3de90776b2838bcf8db656960e04a5d65c 100644 (file)
@@ -18,13 +18,13 @@ struct btree_node_read_all;
 static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
 {
        if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags))
-               atomic_inc(&c->btree_cache.dirty);
+               atomic_long_inc(&c->btree_cache.nr_dirty);
 }
 
 static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
 {
        if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags))
-               atomic_dec(&c->btree_cache.dirty);
+               atomic_long_dec(&c->btree_cache.nr_dirty);
 }
 
 static inline unsigned btree_ptr_sectors_written(struct bkey_s_c k)
index c1ab824e1c34352e9d96678861b16861aa4e040d..806d27b7f41b027ddeda117203a0937cc9e8c2ab 100644 (file)
@@ -180,15 +180,16 @@ struct btree_cache {
        struct list_head        freed_nonpcpu;
 
        /* Number of elements in live + freeable lists */
-       unsigned                used;
-       unsigned                reserve;
-       unsigned                freed;
-       atomic_t                dirty;
+       size_t                  nr_used;
+       size_t                  nr_reserve;
+       size_t                  nr_by_btree[BTREE_ID_NR];
+       atomic_long_t           nr_dirty;
+
+       /* shrinker stats */
+       size_t                  nr_freed;
        u64                     not_freed[BCH_BTREE_CACHE_NOT_FREED_REASONS_NR];
        struct shrinker         *shrink;
 
-       unsigned                used_by_btree[BTREE_ID_NR];
-
        /*
         * If we need to allocate memory for a new btree node and that
         * allocation fails, we can cannibalize another node in the btree cache
index 70b998d9f19ce4e584064e4ba7812c0a7004b9a3..9794b6d214cdd80868709dbf0370c34216872b15 100644 (file)
@@ -681,7 +681,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
                if (j->watermark != BCH_WATERMARK_stripe)
                        min_nr = 1;
 
-               if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
+               if (atomic_long_read(&c->btree_cache.nr_dirty) * 2 > c->btree_cache.nr_used)
                        min_nr = 1;
 
                min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
@@ -689,8 +689,8 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
                trace_and_count(c, journal_reclaim_start, c,
                                direct, kicked,
                                min_nr, min_key_cache,
-                               atomic_read(&c->btree_cache.dirty),
-                               c->btree_cache.used,
+                               atomic_long_read(&c->btree_cache.nr_dirty),
+                               c->btree_cache.nr_used,
                                atomic_long_read(&c->btree_key_cache.nr_dirty),
                                atomic_long_read(&c->btree_key_cache.nr_keys));
 
index 1c949389d88fac516059eb69ea4d4bb972108bb1..873e4be7e1dc0c0e427c35eec1e1b8e66fe1cb17 100644 (file)
@@ -370,7 +370,7 @@ void bch2_fs_read_only(struct bch_fs *c)
            test_bit(BCH_FS_clean_shutdown, &c->flags) &&
            c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) {
                BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
-               BUG_ON(atomic_read(&c->btree_cache.dirty));
+               BUG_ON(atomic_long_read(&c->btree_cache.nr_dirty));
                BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
                BUG_ON(c->btree_write_buffer.inc.keys.nr);
                BUG_ON(c->btree_write_buffer.flushing.keys.nr);