1 /*
2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
3 */
4
5 /*
6 * Copyright (c) 2008-2010, 2013, Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 *
27 * Authors:
28 * Eric Anholt <eric@anholt.net>
29 * Zou Nan hai <nanhai.zou@intel.com>
30 * Xiang Hai hao<haihao.xiang@intel.com>
31 *
32 */
33
34 #ifndef _INTEL_RINGBUFFER_H_
35 #define _INTEL_RINGBUFFER_H_
36
37 /*
38 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
39 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
40 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
41 *
42 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
43 * cacheline, the Head Pointer must not be greater than the Tail
44 * Pointer."
45 */
46 #define I915_RING_FREE_SPACE 64
47
48 struct intel_hw_status_page {
49 void *page_addr;
50 unsigned int gfx_addr;
51 struct drm_i915_gem_object *obj;
52 };
53
54 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
55 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
56
57 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
58 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
59
60 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
61 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
62
63 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
64 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
65
66 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
67 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
68
69 #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
70 #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
71 #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
72
73 enum intel_ring_hangcheck_action { wait, active, kick, hung };
74
75 struct intel_ring_hangcheck {
76 bool deadlock;
77 u32 seqno;
78 u32 acthd;
79 int score;
80 enum intel_ring_hangcheck_action action;
81 };
82
83 struct intel_ring_buffer {
84 const char *name;
85 enum intel_ring_id {
86 RCS = 0x0,
87 VCS,
88 BCS,
89 VECS,
90 } id;
91 #define I915_NUM_RINGS 4
92 u32 mmio_base;
93 void *virtual_start;
94 struct drm_device *dev;
95 struct drm_i915_gem_object *obj;
96
97 u32 head;
98 u32 tail;
99 int space;
100 int size;
101 int effective_size;
102 struct intel_hw_status_page status_page;
103
104 /** We track the position of the requests in the ring buffer, and
105 * when each is retired we increment last_retired_head as the GPU
106 * must have finished processing the request and so we know we
107 * can advance the ringbuffer up to that position.
108 *
109 * last_retired_head is set to -1 after the value is consumed so
110 * we can detect new retirements.
111 */
112 u32 last_retired_head;
113
114 struct {
115 u32 gt; /* protected by dev_priv->irq_lock */
116 u32 pm; /* protected by dev_priv->rps.lock (sucks) */
117 } irq_refcount;
118 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
119 u32 trace_irq_seqno;
120 u32 sync_seqno[I915_NUM_RINGS-1];
121 bool (*irq_get)(struct intel_ring_buffer *ring);
122 void (*irq_put)(struct intel_ring_buffer *ring);
123
124 int (*init)(struct intel_ring_buffer *ring);
125
126 void (*write_tail)(struct intel_ring_buffer *ring,
127 u32 value);
128 int (*flush)(struct intel_ring_buffer *ring,
129 u32 invalidate_domains,
130 u32 flush_domains);
131 int (*add_request)(struct intel_ring_buffer *ring);
132 /* Some chipsets are not quite as coherent as advertised and need
133 * an expensive kick to force a true read of the up-to-date seqno.
134 * However, the up-to-date seqno is not always required and the last
135 * seen value is good enough. Note that the seqno will always be
136 * monotonic, even if not coherent.
137 */
138 u32 (*get_seqno)(struct intel_ring_buffer *ring,
139 bool lazy_coherency);
140 void (*set_seqno)(struct intel_ring_buffer *ring,
141 u32 seqno);
142 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
143 u32 offset, u32 length,
144 unsigned flags);
145 #define I915_DISPATCH_SECURE 0x1
146 #define I915_DISPATCH_PINNED 0x2
147 void (*cleanup)(struct intel_ring_buffer *ring);
148 int (*sync_to)(struct intel_ring_buffer *ring,
149 struct intel_ring_buffer *to,
150 u32 seqno);
151
152 /* our mbox written by others */
153 u32 semaphore_register[I915_NUM_RINGS];
154 /* mboxes this ring signals to */
155 u32 signal_mbox[I915_NUM_RINGS];
156
157 /**
158 * List of objects currently involved in rendering from the
159 * ringbuffer.
160 *
161 * Includes buffers having the contents of their GPU caches
162 * flushed, not necessarily primitives. last_rendering_seqno
163 * represents when the rendering involved will be completed.
164 *
165 * A reference is held on the buffer while on this list.
166 */
167 struct list_head active_list;
168
169 /**
170 * List of breadcrumbs associated with GPU requests currently
171 * outstanding.
172 */
173 struct list_head request_list;
174
175 /**
176 * Do we have some not yet emitted requests outstanding?
177 */
178 u32 outstanding_lazy_request;
179 bool gpu_caches_dirty;
180 bool fbc_dirty;
181
182 wait_queue_head_t irq_queue;
183 drm_local_map_t map;
184
185 /**
186 * Do an explicit TLB flush before MI_SET_CONTEXT
187 */
188 bool itlb_before_ctx_switch;
189 struct i915_hw_context *default_context;
190 struct i915_hw_context *last_context;
191
192 struct intel_ring_hangcheck hangcheck;
193
194 void *private;
195 };
196
197 static inline bool
intel_ring_initialized(struct intel_ring_buffer * ring)198 intel_ring_initialized(struct intel_ring_buffer *ring)
199 {
200 return ring->obj != NULL;
201 }
202
203 static inline unsigned
intel_ring_flag(struct intel_ring_buffer * ring)204 intel_ring_flag(struct intel_ring_buffer *ring)
205 {
206 return 1 << ring->id;
207 }
208
209 static inline u32
intel_ring_sync_index(struct intel_ring_buffer * ring,struct intel_ring_buffer * other)210 intel_ring_sync_index(struct intel_ring_buffer *ring,
211 struct intel_ring_buffer *other)
212 {
213 int idx;
214
215 /*
216 * cs -> 0 = vcs, 1 = bcs
217 * vcs -> 0 = bcs, 1 = cs,
218 * bcs -> 0 = cs, 1 = vcs.
219 */
220
221 idx = (other - ring) - 1;
222 if (idx < 0)
223 idx += I915_NUM_RINGS;
224
225 return idx;
226 }
227
228 static inline u32
intel_read_status_page(struct intel_ring_buffer * ring,int reg)229 intel_read_status_page(struct intel_ring_buffer *ring,
230 int reg)
231 {
232 u32 *regs = ring->status_page.page_addr;
233 return regs[reg];
234 }
235
236 static inline void
intel_write_status_page(struct intel_ring_buffer * ring,int reg,u32 value)237 intel_write_status_page(struct intel_ring_buffer *ring,
238 int reg, u32 value)
239 {
240 u32 *regs = ring->status_page.page_addr;
241 regs[reg] = value;
242 }
243
244 /**
245 * Reads a dword out of the status page, which is written to from the command
246 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
247 * MI_STORE_DATA_IMM.
248 *
249 * The following dwords have a reserved meaning:
250 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
251 * 0x04: ring 0 head pointer
252 * 0x05: ring 1 head pointer (915-class)
253 * 0x06: ring 2 head pointer (915-class)
254 * 0x10-0x1b: Context status DWords (GM45)
255 * 0x1f: Last written status offset. (GM45)
256 *
257 * The area from dword 0x20 to 0x3ff is available for driver usage.
258 */
259 #define I915_GEM_HWS_INDEX 0x20
260 #define I915_GEM_HWS_SCRATCH_INDEX 0x30
261 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
262
263 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
264
265 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
266 int intel_wait_ring_idle(struct intel_ring_buffer *ring);
267
268 int intel_ring_begin(struct intel_ring_buffer *ring, int n);
269
intel_ring_emit(struct intel_ring_buffer * ring,u32 data)270 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
271 u32 data)
272 {
273 unsigned int *virt = (unsigned int *)((intptr_t)ring->virtual_start + ring->tail);
274 *virt = data;
275 ring->tail += 4;
276 }
277
278 void intel_ring_advance(struct intel_ring_buffer *ring);
279 int intel_ring_idle(struct intel_ring_buffer *ring);
280 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
281 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
282 int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
283
284 int intel_init_render_ring_buffer(struct drm_device *dev);
285 int intel_init_bsd_ring_buffer(struct drm_device *dev);
286 int intel_init_blt_ring_buffer(struct drm_device *dev);
287 int intel_init_vebox_ring_buffer(struct drm_device *dev);
288
289 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
290 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
291
intel_ring_get_tail(struct intel_ring_buffer * ring)292 static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
293 {
294 return ring->tail;
295 }
296
intel_ring_get_seqno(struct intel_ring_buffer * ring)297 static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
298 {
299 BUG_ON(ring->outstanding_lazy_request == 0);
300 return ring->outstanding_lazy_request;
301 }
302
i915_trace_irq_get(struct intel_ring_buffer * ring,u32 seqno)303 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
304 {
305 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
306 ring->trace_irq_seqno = seqno;
307 }
308
309 /* DRI warts */
310 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
311
312 #endif /* _INTEL_RINGBUFFER_H_ */
313