@@ -189,12 +189,9 @@ static ssize_t queue_discard_max_store(struct request_queue *q,
189
189
if ((max_discard_bytes >> SECTOR_SHIFT ) > UINT_MAX )
190
190
return - EINVAL ;
191
191
192
- blk_mq_freeze_queue (q );
193
192
lim = queue_limits_start_update (q );
194
193
lim .max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT ;
195
194
err = queue_limits_commit_update (q , & lim );
196
- blk_mq_unfreeze_queue (q );
197
-
198
195
if (err )
199
196
return err ;
200
197
return ret ;
@@ -241,11 +238,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
241
238
if (ret < 0 )
242
239
return ret ;
243
240
244
- blk_mq_freeze_queue (q );
245
241
lim = queue_limits_start_update (q );
246
242
lim .max_user_sectors = max_sectors_kb << 1 ;
247
243
err = queue_limits_commit_update (q , & lim );
248
- blk_mq_unfreeze_queue (q );
249
244
if (err )
250
245
return err ;
251
246
return ret ;
@@ -585,13 +580,11 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
585
580
* ends up either enabling or disabling wbt completely. We can't
586
581
* have IO inflight if that happens.
587
582
*/
588
- blk_mq_freeze_queue (q );
589
583
blk_mq_quiesce_queue (q );
590
584
591
585
wbt_set_min_lat (q , val );
592
586
593
587
blk_mq_unquiesce_queue (q );
594
- blk_mq_unfreeze_queue (q );
595
588
596
589
return count ;
597
590
}
@@ -722,9 +715,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
722
715
if (!entry -> store )
723
716
return - EIO ;
724
717
718
+ blk_mq_freeze_queue (q );
725
719
mutex_lock (& q -> sysfs_lock );
726
720
res = entry -> store (q , page , length );
727
721
mutex_unlock (& q -> sysfs_lock );
722
+ blk_mq_unfreeze_queue (q );
728
723
return res ;
729
724
}
730
725
0 commit comments