blkcg: tg_stats_alloc_lock is an irq lock

tg_stats_alloc_lock nests inside queue lock and should always be held
with irq disabled.  throtl_pd_{init|exit}() were using non-irqsafe
spinlock ops which triggered inverse lock ordering via irq warning via
RCU freeing of blkg invoking throtl_pd_exit() w/o disabling IRQ.

Update both functions to use irq safe operations.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Sasha Levin <sasha.levin@oracle.com>
LKML-Reference: <1335339396.16988.80.camel@lappy>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 14dedec..5b06595 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -219,6 +219,7 @@
 static void throtl_pd_init(struct blkcg_gq *blkg)
 {
 	struct throtl_grp *tg = blkg_to_tg(blkg);
+	unsigned long flags;
 
 	RB_CLEAR_NODE(&tg->rb_node);
 	bio_list_init(&tg->bio_lists[0]);
@@ -235,19 +236,20 @@
 	 * but percpu allocator can't be called from IO path.  Queue tg on
 	 * tg_stats_alloc_list and allocate from work item.
 	 */
-	spin_lock(&tg_stats_alloc_lock);
+	spin_lock_irqsave(&tg_stats_alloc_lock, flags);
 	list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
 	queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
-	spin_unlock(&tg_stats_alloc_lock);
+	spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 }
 
 static void throtl_pd_exit(struct blkcg_gq *blkg)
 {
 	struct throtl_grp *tg = blkg_to_tg(blkg);
+	unsigned long flags;
 
-	spin_lock(&tg_stats_alloc_lock);
+	spin_lock_irqsave(&tg_stats_alloc_lock, flags);
 	list_del_init(&tg->stats_alloc_node);
-	spin_unlock(&tg_stats_alloc_lock);
+	spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 
 	free_percpu(tg->stats_cpu);
 }