drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit688e6c7258
("drm/i915: Slaughter the thundering i915_wait_request herd"), the issue of handling multiple clients waiting in parallel was brought to our attention. The requirement was that every client should be woken immediately upon its request being signaled, without incurring any cpu overhead. To handle certain fragility of our hw meant that we could not do a simple check inside the irq handler (some generations required almost unbounded delays before we could be sure of seqno coherency) and so request completion checking required delegation. Before commit688e6c7258
, the solution was simple. Every client waiting on a request would be woken on every interrupt and each would do a heavyweight check to see if their request was complete. Commit688e6c7258
introduced an rbtree so that only the earliest waiter on the global timeline would woken, and would wake the next and so on. (Along with various complications to handle requests being reordered along the global timeline, and also a requirement for kthread to provide a delegate for fence signaling that had no process context.) The global rbtree depends on knowing the execution timeline (and global seqno). Without knowing that order, we must instead check all contexts queued to the HW to see which may have advanced. We trim that list by only checking queued contexts that are being waited on, but still we keep a list of all active contexts and their active signalers that we inspect from inside the irq handler. By moving the waiters onto the fence signal list, we can combine the client wakeup with the dma_fence signaling (a dramatic reduction in complexity, but does require the HW being coherent, the seqno must be visible from the cpu before the interrupt is raised - we keep a timer backup just in case). Having previously fixed all the issues with irq-seqno serialisation (by inserting delays onto the GPU after each request instead of random delays on the CPU after each interrupt), we can rely on the seqno state to perfom direct wakeups from the interrupt handler. This allows us to preserve our single context switch behaviour of the current routine, with the only downside that we lose the RT priority sorting of wakeups. In general, direct wakeup latency of multiple clients is about the same (about 10% better in most cases) with a reduction in total CPU time spent in the waiter (about 20-50% depending on gen). Average herd behaviour is improved, but at the cost of not delegating wakeups on task_prio. v2: Capture fence signaling state for error state and add comments to warm even the most cold of hearts. v3: Check if the request is still active before busywaiting v4: Reduce the amount of pointer misdirection with list_for_each_safe and using a local i915_request variable inside the loops v5: Add a missing pluralisation to a purely informative selftest message. References:688e6c7258
("drm/i915: Slaughter the thundering i915_wait_request herd") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
This commit is contained in:
@@ -5,6 +5,7 @@
|
||||
#include <drm/drm_util.h>
|
||||
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/seqlock.h>
|
||||
|
||||
#include "i915_gem_batch_pool.h"
|
||||
@@ -381,22 +382,19 @@ struct intel_engine_cs {
|
||||
* the overhead of waking that client is much preferred.
|
||||
*/
|
||||
struct intel_breadcrumbs {
|
||||
spinlock_t irq_lock; /* protects irq_*; irqsafe */
|
||||
struct intel_wait *irq_wait; /* oldest waiter by retirement */
|
||||
spinlock_t irq_lock;
|
||||
struct list_head signalers;
|
||||
|
||||
spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
|
||||
struct rb_root waiters; /* sorted by retirement, priority */
|
||||
struct list_head signals; /* sorted by retirement */
|
||||
struct task_struct *signaler; /* used for fence signalling */
|
||||
struct irq_work irq_work; /* for use from inside irq_lock */
|
||||
|
||||
struct timer_list fake_irq; /* used after a missed interrupt */
|
||||
struct timer_list hangcheck; /* detect missed interrupts */
|
||||
|
||||
unsigned int hangcheck_interrupts;
|
||||
unsigned int irq_enabled;
|
||||
unsigned int irq_count;
|
||||
|
||||
bool irq_armed : 1;
|
||||
bool irq_armed;
|
||||
bool irq_fired;
|
||||
} breadcrumbs;
|
||||
|
||||
struct {
|
||||
@@ -885,83 +883,29 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
|
||||
void intel_engine_get_instdone(struct intel_engine_cs *engine,
|
||||
struct intel_instdone *instdone);
|
||||
|
||||
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
|
||||
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
|
||||
|
||||
static inline void intel_wait_init(struct intel_wait *wait)
|
||||
{
|
||||
wait->tsk = current;
|
||||
wait->request = NULL;
|
||||
}
|
||||
|
||||
static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
|
||||
{
|
||||
wait->tsk = current;
|
||||
wait->seqno = seqno;
|
||||
}
|
||||
|
||||
static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
|
||||
{
|
||||
return wait->seqno;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
|
||||
{
|
||||
wait->seqno = seqno;
|
||||
return intel_wait_has_seqno(wait);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
intel_wait_update_request(struct intel_wait *wait,
|
||||
const struct i915_request *rq)
|
||||
{
|
||||
return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
|
||||
{
|
||||
return wait->seqno == seqno;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
intel_wait_check_request(const struct intel_wait *wait,
|
||||
const struct i915_request *rq)
|
||||
{
|
||||
return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
|
||||
}
|
||||
|
||||
static inline bool intel_wait_complete(const struct intel_wait *wait)
|
||||
{
|
||||
return RB_EMPTY_NODE(&wait->node);
|
||||
}
|
||||
|
||||
bool intel_engine_add_wait(struct intel_engine_cs *engine,
|
||||
struct intel_wait *wait);
|
||||
void intel_engine_remove_wait(struct intel_engine_cs *engine,
|
||||
struct intel_wait *wait);
|
||||
bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
|
||||
void intel_engine_cancel_signaling(struct i915_request *request);
|
||||
|
||||
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
|
||||
{
|
||||
return READ_ONCE(engine->breadcrumbs.irq_wait);
|
||||
}
|
||||
|
||||
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
|
||||
#define ENGINE_WAKEUP_WAITER BIT(0)
|
||||
#define ENGINE_WAKEUP_ASLEEP BIT(1)
|
||||
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
|
||||
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
|
||||
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
|
||||
|
||||
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
|
||||
bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
|
||||
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
|
||||
|
||||
static inline void
|
||||
intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
|
||||
{
|
||||
irq_work_queue(&engine->breadcrumbs.irq_work);
|
||||
}
|
||||
|
||||
bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
|
||||
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
|
||||
struct drm_printer *p);
|
||||
|
||||
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
|
||||
{
|
||||
memset(batch, 0, 6 * sizeof(u32));
|
||||
|
Reference in New Issue
Block a user