Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame^] | 1 | #ifndef BLK_INTERNAL_H |
| 2 | #define BLK_INTERNAL_H |
| 3 | |
| 4 | extern struct kmem_cache *blk_requestq_cachep; |
| 5 | extern struct kobj_type blk_queue_ktype; |
| 6 | |
| 7 | void __blk_queue_free_tags(struct request_queue *q); |
| 8 | |
| 9 | void blk_queue_congestion_threshold(struct request_queue *q); |
| 10 | |
| 11 | /* |
| 12 | * Return the threshold (number of used requests) at which the queue is |
| 13 | * considered to be congested. It include a little hysteresis to keep the |
| 14 | * context switch rate down. |
| 15 | */ |
| 16 | static inline int queue_congestion_on_threshold(struct request_queue *q) |
| 17 | { |
| 18 | return q->nr_congestion_on; |
| 19 | } |
| 20 | |
| 21 | /* |
| 22 | * The threshold at which a queue is considered to be uncongested |
| 23 | */ |
| 24 | static inline int queue_congestion_off_threshold(struct request_queue *q) |
| 25 | { |
| 26 | return q->nr_congestion_off; |
| 27 | } |
| 28 | |
| 29 | #endif |