bdi: reimplement bdev_inode_switch_bdi()
A block_device may be attached to different gendisks and thus different bdis over time. bdev_inode_switch_bdi() is used to switch the associated bdi. The function assumes that the inode could be dirty and transfers it between bdis if so. This is a bit nasty in that it reaches into bdi internals. This patch reimplements the function so that it writes out the inode if dirty. This is a lot simpler and can be implemented without exposing bdi internals. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
1a1e4530ea
commit
018a17bdc8
3 changed files with 12 additions and 23 deletions
|
@ -50,32 +50,22 @@ inline struct block_device *I_BDEV(struct inode *inode)
|
||||||
EXPORT_SYMBOL(I_BDEV);
|
EXPORT_SYMBOL(I_BDEV);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Move the inode from its current bdi to a new bdi. If the inode is dirty we
|
* Move the inode from its current bdi to a new bdi. Make sure the inode
|
||||||
* need to move it onto the dirty list of @dst so that the inode is always on
|
* is clean before moving so that it doesn't linger on the old bdi.
|
||||||
* the right list.
|
|
||||||
*/
|
*/
|
||||||
static void bdev_inode_switch_bdi(struct inode *inode,
|
static void bdev_inode_switch_bdi(struct inode *inode,
|
||||||
struct backing_dev_info *dst)
|
struct backing_dev_info *dst)
|
||||||
{
|
{
|
||||||
struct backing_dev_info *old = inode->i_data.backing_dev_info;
|
while (true) {
|
||||||
bool wakeup_bdi = false;
|
spin_lock(&inode->i_lock);
|
||||||
|
if (!(inode->i_state & I_DIRTY)) {
|
||||||
if (unlikely(dst == old)) /* deadlock avoidance */
|
inode->i_data.backing_dev_info = dst;
|
||||||
return;
|
spin_unlock(&inode->i_lock);
|
||||||
bdi_lock_two(&old->wb, &dst->wb);
|
return;
|
||||||
spin_lock(&inode->i_lock);
|
}
|
||||||
inode->i_data.backing_dev_info = dst;
|
spin_unlock(&inode->i_lock);
|
||||||
if (inode->i_state & I_DIRTY) {
|
WARN_ON_ONCE(write_inode_now(inode, true));
|
||||||
if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb))
|
|
||||||
wakeup_bdi = true;
|
|
||||||
list_move(&inode->i_wb_list, &dst->wb.b_dirty);
|
|
||||||
}
|
}
|
||||||
spin_unlock(&inode->i_lock);
|
|
||||||
spin_unlock(&old->wb.list_lock);
|
|
||||||
spin_unlock(&dst->wb.list_lock);
|
|
||||||
|
|
||||||
if (wakeup_bdi)
|
|
||||||
bdi_wakeup_thread_delayed(dst);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Kill _all_ buffers and pagecache , dirty or not.. */
|
/* Kill _all_ buffers and pagecache , dirty or not.. */
|
||||||
|
|
|
@ -121,7 +121,6 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi);
|
||||||
void bdi_writeback_workfn(struct work_struct *work);
|
void bdi_writeback_workfn(struct work_struct *work);
|
||||||
int bdi_has_dirty_io(struct backing_dev_info *bdi);
|
int bdi_has_dirty_io(struct backing_dev_info *bdi);
|
||||||
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
|
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
|
||||||
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
|
|
||||||
|
|
||||||
extern spinlock_t bdi_lock;
|
extern spinlock_t bdi_lock;
|
||||||
extern struct list_head bdi_list;
|
extern struct list_head bdi_list;
|
||||||
|
|
|
@ -40,7 +40,7 @@ LIST_HEAD(bdi_list);
|
||||||
/* bdi_wq serves all asynchronous writeback tasks */
|
/* bdi_wq serves all asynchronous writeback tasks */
|
||||||
struct workqueue_struct *bdi_wq;
|
struct workqueue_struct *bdi_wq;
|
||||||
|
|
||||||
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
|
static void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
|
||||||
{
|
{
|
||||||
if (wb1 < wb2) {
|
if (wb1 < wb2) {
|
||||||
spin_lock(&wb1->list_lock);
|
spin_lock(&wb1->list_lock);
|
||||||
|
|
Loading…
Add table
Reference in a new issue