drm/i915: Mark up i915_gem_active for locking annotation
The future annotations will track the locking used for access to ensure
that it is always sufficient. We make the preparations now to present
the API ahead and to make sure that GCC can eliminate the unused
parameter.
Before: 6298417 3619610 696320 10614347 a1f64b vmlinux
After: 6298417 3619610 696320 10614347 a1f64b vmlinux
(with i915 builtin)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-12-git-send-email-chris@chris-wilson.co.uk
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 24ff7f4..c595cc8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -155,10 +155,13 @@
obj->base.write_domain);
for_each_engine_id(engine, dev_priv, id)
seq_printf(m, "%x ",
- i915_gem_active_get_seqno(&obj->last_read[id]));
+ i915_gem_active_get_seqno(&obj->last_read[id],
+ &obj->base.dev->struct_mutex));
seq_printf(m, "] %x %x%s%s%s",
- i915_gem_active_get_seqno(&obj->last_write),
- i915_gem_active_get_seqno(&obj->last_fence),
+ i915_gem_active_get_seqno(&obj->last_write,
+ &obj->base.dev->struct_mutex),
+ i915_gem_active_get_seqno(&obj->last_fence,
+ &obj->base.dev->struct_mutex),
i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -196,7 +199,8 @@
seq_printf(m, " (%s mappable)", s);
}
- engine = i915_gem_active_get_engine(&obj->last_write);
+ engine = i915_gem_active_get_engine(&obj->last_write,
+ &obj->base.dev->struct_mutex);
if (engine)
seq_printf(m, " (%s)", engine->name);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c158c7..2b6199c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1354,21 +1354,24 @@
int ret, i;
if (readonly) {
- request = i915_gem_active_peek(&obj->last_write);
+ request = i915_gem_active_peek(&obj->last_write,
+ &obj->base.dev->struct_mutex);
if (request) {
ret = i915_wait_request(request);
if (ret)
return ret;
i = request->engine->id;
- if (i915_gem_active_peek(&obj->last_read[i]) == request)
+ if (i915_gem_active_peek(&obj->last_read[i],
+ &obj->base.dev->struct_mutex) == request)
i915_gem_object_retire__read(obj, i);
else
i915_gem_object_retire__write(obj);
}
} else {
for (i = 0; i < I915_NUM_ENGINES; i++) {
- request = i915_gem_active_peek(&obj->last_read[i]);
+ request = i915_gem_active_peek(&obj->last_read[i],
+ &obj->base.dev->struct_mutex);
if (!request)
continue;
@@ -1400,9 +1403,11 @@
{
int idx = req->engine->id;
- if (i915_gem_active_peek(&obj->last_read[idx]) == req)
+ if (i915_gem_active_peek(&obj->last_read[idx],
+ &obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__read(obj, idx);
- else if (i915_gem_active_peek(&obj->last_write) == req)
+ else if (i915_gem_active_peek(&obj->last_write,
+ &obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__write(obj);
if (!i915_reset_in_progress(&req->i915->gpu_error))
@@ -1431,7 +1436,8 @@
if (readonly) {
struct drm_i915_gem_request *req;
- req = i915_gem_active_get(&obj->last_write);
+ req = i915_gem_active_get(&obj->last_write,
+ &obj->base.dev->struct_mutex);
if (req == NULL)
return 0;
@@ -1440,7 +1446,8 @@
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
- req = i915_gem_active_get(&obj->last_read[i]);
+ req = i915_gem_active_get(&obj->last_read[i],
+ &obj->base.dev->struct_mutex);
if (req == NULL)
continue;
@@ -2387,7 +2394,9 @@
i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_active_isset(&obj->last_write));
- GEM_BUG_ON(!(obj->active & intel_engine_flag(i915_gem_active_get_engine(&obj->last_write))));
+ GEM_BUG_ON(!(obj->active &
+ intel_engine_flag(i915_gem_active_get_engine(&obj->last_write,
+ &obj->base.dev->struct_mutex))));
i915_gem_active_set(&obj->last_write, NULL);
intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2405,7 +2414,8 @@
list_del_init(&obj->engine_list[idx]);
i915_gem_active_set(&obj->last_read[idx], NULL);
- engine = i915_gem_active_get_engine(&obj->last_write);
+ engine = i915_gem_active_get_engine(&obj->last_write,
+ &obj->base.dev->struct_mutex);
if (engine && engine->id == idx)
i915_gem_object_retire__write(obj);
@@ -2626,7 +2636,8 @@
struct drm_i915_gem_object,
engine_list[engine->id]);
- if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id])->list))
+ if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id],
+ &obj->base.dev->struct_mutex)->list))
break;
i915_gem_object_retire__read(obj, engine->id);
@@ -2759,7 +2770,8 @@
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
- req = i915_gem_active_peek(&obj->last_read[i]);
+ req = i915_gem_active_peek(&obj->last_read[i],
+ &obj->base.dev->struct_mutex);
if (req == NULL)
continue;
@@ -2837,7 +2849,8 @@
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
- req = i915_gem_active_get(&obj->last_read[i]);
+ req = i915_gem_active_get(&obj->last_read[i],
+ &obj->base.dev->struct_mutex);
if (req)
requests[n++] = req;
}
@@ -2932,14 +2945,16 @@
if (readonly) {
struct drm_i915_gem_request *req;
- req = i915_gem_active_peek(&obj->last_write);
+ req = i915_gem_active_peek(&obj->last_write,
+ &obj->base.dev->struct_mutex);
if (req)
requests[n++] = req;
} else {
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
- req = i915_gem_active_peek(&obj->last_read[i]);
+ req = i915_gem_active_peek(&obj->last_read[i],
+ &obj->base.dev->struct_mutex);
if (req)
requests[n++] = req;
}
@@ -4038,11 +4053,13 @@
int i;
for (i = 0; i < I915_NUM_ENGINES; i++) {
- req = i915_gem_active_peek(&obj->last_read[i]);
+ req = i915_gem_active_peek(&obj->last_read[i],
+ &obj->base.dev->struct_mutex);
if (req)
args->busy |= 1 << (16 + req->engine->exec_id);
}
- req = i915_gem_active_peek(&obj->last_write);
+ req = i915_gem_active_peek(&obj->last_write,
+ &obj->base.dev->struct_mutex);
if (req)
args->busy |= req->engine->exec_id;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 9fdbd66..a4ec4fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -263,7 +263,8 @@
{
int ret;
- ret = i915_gem_active_wait(&obj->last_fence);
+ ret = i915_gem_active_wait(&obj->last_fence,
+ &obj->base.dev->struct_mutex);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index e13834e..5f8d94c 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -296,6 +296,12 @@
i915_gem_request_assign(&active->request, request);
}
+static inline struct drm_i915_gem_request *
+__i915_gem_active_peek(const struct i915_gem_active *active)
+{
+ return active->request;
+}
+
/**
* i915_gem_active_peek - report the request being monitored
* @active - the active tracker
@@ -305,7 +311,7 @@
* caller must hold struct_mutex.
*/
static inline struct drm_i915_gem_request *
-i915_gem_active_peek(const struct i915_gem_active *active)
+i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
{
return active->request;
}
@@ -318,11 +324,11 @@
* if the active tracker is idle. The caller must hold struct_mutex.
*/
static inline struct drm_i915_gem_request *
-i915_gem_active_get(const struct i915_gem_active *active)
+i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
{
struct drm_i915_gem_request *request;
- request = i915_gem_active_peek(active);
+ request = i915_gem_active_peek(active, mutex);
if (!request || i915_gem_request_completed(request))
return NULL;
@@ -352,11 +358,12 @@
* the caller to hold struct_mutex (but that can be relaxed if desired).
*/
static inline bool
-i915_gem_active_is_idle(const struct i915_gem_active *active)
+i915_gem_active_is_idle(const struct i915_gem_active *active,
+ struct mutex *mutex)
{
struct drm_i915_gem_request *request;
- request = i915_gem_active_peek(active);
+ request = i915_gem_active_peek(active, mutex);
if (!request || i915_gem_request_completed(request))
return true;
@@ -372,11 +379,11 @@
* retired first, see i915_gem_active_retire().
*/
static inline int __must_check
-i915_gem_active_wait(const struct i915_gem_active *active)
+i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
{
struct drm_i915_gem_request *request;
- request = i915_gem_active_peek(active);
+ request = i915_gem_active_peek(active, mutex);
if (!request)
return 0;
@@ -393,9 +400,10 @@
* tracker is idle, the function returns immediately.
*/
static inline int __must_check
-i915_gem_active_retire(const struct i915_gem_active *active)
+i915_gem_active_retire(const struct i915_gem_active *active,
+ struct mutex *mutex)
{
- return i915_gem_active_wait(active);
+ return i915_gem_active_wait(active, mutex);
}
/* Convenience functions for peeking at state inside active's request whilst
@@ -403,15 +411,17 @@
*/
static inline uint32_t
-i915_gem_active_get_seqno(const struct i915_gem_active *active)
+i915_gem_active_get_seqno(const struct i915_gem_active *active,
+ struct mutex *mutex)
{
- return i915_gem_request_get_seqno(i915_gem_active_peek(active));
+ return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
}
static inline struct intel_engine_cs *
-i915_gem_active_get_engine(const struct i915_gem_active *active)
+i915_gem_active_get_engine(const struct i915_gem_active *active,
+ struct mutex *mutex)
{
- return i915_gem_request_get_engine(i915_gem_active_peek(active));
+ return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
}
#define for_each_active(mask, idx) \
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 8cef2d6..fa2eb4a 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -242,7 +242,8 @@
}
obj->fence_dirty =
- !i915_gem_active_is_idle(&obj->last_fence) ||
+ !i915_gem_active_is_idle(&obj->last_fence,
+ &dev->struct_mutex) ||
obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 00ab5e9..e57521d 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -74,7 +74,8 @@
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
- req = i915_gem_active_get(&obj->last_read[i]);
+ req = i915_gem_active_get(&obj->last_read[i],
+ &obj->base.dev->struct_mutex);
if (req)
requests[n++] = req;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 585fe2b..0d2882a 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -742,18 +742,38 @@
#define i915_error_ggtt_object_create(dev_priv, src) \
i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
+/* The error capture is special as tries to run underneath the normal
+ * locking rules - so we use the raw version of the i915_gem_active lookup.
+ */
+static inline uint32_t
+__active_get_seqno(struct i915_gem_active *active)
+{
+ return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+}
+
+static inline int
+__active_get_engine_id(struct i915_gem_active *active)
+{
+ struct intel_engine_cs *engine;
+
+ engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
+ return engine ? engine->id : -1;
+}
+
static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- struct intel_engine_cs *engine;
int i;
err->size = obj->base.size;
err->name = obj->base.name;
+
for (i = 0; i < I915_NUM_ENGINES; i++)
- err->rseqno[i] = i915_gem_active_get_seqno(&obj->last_read[i]);
- err->wseqno = i915_gem_active_get_seqno(&obj->last_write);
+ err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
+ err->wseqno = __active_get_seqno(&obj->last_write);
+ err->engine = __active_get_engine_id(&obj->last_write);
+
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
@@ -766,9 +786,6 @@
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
err->cache_level = obj->cache_level;
-
- engine = i915_gem_active_get_engine(&obj->last_write);
- err->engine = engine ? engine->id : -1;
}
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8c03d13d..d54a3ea 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11370,7 +11370,8 @@
if (resv && !reservation_object_test_signaled_rcu(resv, false))
return true;
- return engine != i915_gem_active_get_engine(&obj->last_write);
+ return engine != i915_gem_active_get_engine(&obj->last_write,
+ &obj->base.dev->struct_mutex);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11673,7 +11674,8 @@
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
engine = &dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- engine = i915_gem_active_get_engine(&obj->last_write);
+ engine = i915_gem_active_get_engine(&obj->last_write,
+ &obj->base.dev->struct_mutex);
if (engine == NULL || engine->id != RCS)
engine = &dev_priv->engine[BCS];
} else {
@@ -11694,7 +11696,8 @@
if (mmio_flip) {
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
- work->flip_queued_req = i915_gem_active_get(&obj->last_write);
+ work->flip_queued_req = i915_gem_active_get(&obj->last_write,
+ &obj->base.dev->struct_mutex);
schedule_work(&work->mmio_work);
} else {
request = i915_gem_request_alloc(engine, engine->last_context);
@@ -14038,7 +14041,8 @@
if (ret == 0) {
to_intel_plane_state(new_state)->wait_req =
- i915_gem_active_get(&obj->last_write);
+ i915_gem_active_get(&obj->last_write,
+ &obj->base.dev->struct_mutex);
}
return ret;