diff options
Diffstat (limited to 'drivers/md/bcache/bcache.h')
-rw-r--r-- | drivers/md/bcache/bcache.h | 56 |
1 files changed, 16 insertions, 40 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index a4c7306ff43..82c9c5d3525 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -195,9 +195,7 @@ struct bucket { atomic_t pin; uint16_t prio; uint8_t gen; - uint8_t disk_gen; uint8_t last_gc; /* Most out of date gen in the btree */ - uint8_t gc_gen; uint16_t gc_mark; /* Bitfield used by GC. See below for field */ }; @@ -207,9 +205,9 @@ struct bucket { */ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); -#define GC_MARK_RECLAIMABLE 0 -#define GC_MARK_DIRTY 1 -#define GC_MARK_METADATA 2 +#define GC_MARK_RECLAIMABLE 1 +#define GC_MARK_DIRTY 2 +#define GC_MARK_METADATA 3 #define GC_SECTORS_USED_SIZE 13 #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); @@ -426,14 +424,9 @@ struct cache { * their new gen to disk. After prio_write() finishes writing the new * gens/prios, they'll be moved to the free list (and possibly discarded * in the process) - * - * unused: GC found nothing pointing into these buckets (possibly - * because all the data they contained was overwritten), so we only - * need to discard them before they can be moved to the free list. */ DECLARE_FIFO(long, free)[RESERVE_NR]; DECLARE_FIFO(long, free_inc); - DECLARE_FIFO(long, unused); size_t fifo_last_bucket; @@ -443,12 +436,6 @@ struct cache { DECLARE_HEAP(struct bucket *, heap); /* - * max(gen - disk_gen) for all buckets. When it gets too big we have to - * call prio_write() to keep gens from wrapping. - */ - uint8_t need_save_prio; - - /* * If nonzero, we know we aren't going to find any buckets to invalidate * until a gc finishes - otherwise we could pointlessly burn a ton of * cpu @@ -562,19 +549,16 @@ struct cache_set { struct list_head btree_cache_freed; /* Number of elements in btree_cache + btree_cache_freeable lists */ - unsigned bucket_cache_used; + unsigned btree_cache_used; /* * If we need to allocate memory for a new btree node and that * allocation fails, we can cannibalize another node in the btree cache - * to satisfy the allocation. However, only one thread can be doing this - * at a time, for obvious reasons - try_harder and try_wait are - * basically a lock for this that we can wait on asynchronously. The - * btree_root() macro releases the lock when it returns. + * to satisfy the allocation - lock to guarantee only one thread does + * this at a time: */ - struct task_struct *try_harder; - wait_queue_head_t try_wait; - uint64_t try_harder_start; + wait_queue_head_t btree_cache_wait; + struct task_struct *btree_cache_alloc_lock; /* * When we free a btree node, we increment the gen of the bucket the @@ -603,7 +587,7 @@ struct cache_set { uint16_t min_prio; /* - * max(gen - gc_gen) for all buckets. When it gets too big we have to gc + * max(gen - last_gc) for all buckets. When it gets too big we have to gc * to keep gens from wrapping around. */ uint8_t need_gc; @@ -628,6 +612,8 @@ struct cache_set { /* Number of moving GC bios in flight */ struct semaphore moving_in_flight; + struct workqueue_struct *moving_gc_wq; + struct btree *root; #ifdef CONFIG_BCACHE_DEBUG @@ -667,7 +653,6 @@ struct cache_set { struct time_stats btree_gc_time; struct time_stats btree_split_time; struct time_stats btree_read_time; - struct time_stats try_harder_time; atomic_long_t cache_read_races; atomic_long_t writeback_keys_done; @@ -850,9 +835,6 @@ static inline bool cached_dev_get(struct cached_dev *dc) /* * bucket_gc_gen() returns the difference between the bucket's current gen and * the oldest gen of any pointer into that bucket in the btree (last_gc). - * - * bucket_disk_gen() returns the difference between the current gen and the gen - * on disk; they're both used to make sure gens don't wrap around. */ static inline uint8_t bucket_gc_gen(struct bucket *b) @@ -860,13 +842,7 @@ static inline uint8_t bucket_gc_gen(struct bucket *b) return b->gen - b->last_gc; } -static inline uint8_t bucket_disk_gen(struct bucket *b) -{ - return b->gen - b->disk_gen; -} - #define BUCKET_GC_GEN_MAX 96U -#define BUCKET_DISK_GEN_MAX 64U #define kobj_attribute_write(n, fn) \ static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn) @@ -899,11 +875,14 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); uint8_t bch_inc_gen(struct cache *, struct bucket *); void bch_rescale_priorities(struct cache_set *, int); -bool bch_bucket_add_unused(struct cache *, struct bucket *); -long bch_bucket_alloc(struct cache *, unsigned, bool); +bool bch_can_invalidate_bucket(struct cache *, struct bucket *); +void __bch_invalidate_one_bucket(struct cache *, struct bucket *); + +void __bch_bucket_free(struct cache *, struct bucket *); void bch_bucket_free(struct cache_set *, struct bkey *); +long bch_bucket_alloc(struct cache *, unsigned, bool); int __bch_bucket_alloc_set(struct cache_set *, unsigned, struct bkey *, int, bool); int bch_bucket_alloc_set(struct cache_set *, unsigned, @@ -954,13 +933,10 @@ int bch_open_buckets_alloc(struct cache_set *); void bch_open_buckets_free(struct cache_set *); int bch_cache_allocator_start(struct cache *ca); -int bch_cache_allocator_init(struct cache *ca); void bch_debug_exit(void); int bch_debug_init(struct kobject *); void bch_request_exit(void); int bch_request_init(void); -void bch_btree_exit(void); -int bch_btree_init(void); #endif /* _BCACHE_H */ |