Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include <drm/drmP.h>
31 #include "i915_drv.h"
32 #include <drm/i915_drm.h>
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35
36 static inline int ring_space(struct intel_ring_buffer *ring)
37 {
38         int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
39         if (space < 0)
40                 space += ring->size;
41         return space;
42 }
43
44 void __intel_ring_advance(struct intel_ring_buffer *ring)
45 {
46         struct drm_i915_private *dev_priv = ring->dev->dev_private;
47
48         ring->tail &= ring->size - 1;
49         if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
50                 return;
51         ring->write_tail(ring, ring->tail);
52 }
53
54 static int
55 gen2_render_ring_flush(struct intel_ring_buffer *ring,
56                        u32      invalidate_domains,
57                        u32      flush_domains)
58 {
59         u32 cmd;
60         int ret;
61
62         cmd = MI_FLUSH;
63         if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
64                 cmd |= MI_NO_WRITE_FLUSH;
65
66         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
67                 cmd |= MI_READ_FLUSH;
68
69         ret = intel_ring_begin(ring, 2);
70         if (ret)
71                 return ret;
72
73         intel_ring_emit(ring, cmd);
74         intel_ring_emit(ring, MI_NOOP);
75         intel_ring_advance(ring);
76
77         return 0;
78 }
79
80 static int
81 gen4_render_ring_flush(struct intel_ring_buffer *ring,
82                        u32      invalidate_domains,
83                        u32      flush_domains)
84 {
85         struct drm_device *dev = ring->dev;
86         u32 cmd;
87         int ret;
88
89         /*
90          * read/write caches:
91          *
92          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
93          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
94          * also flushed at 2d versus 3d pipeline switches.
95          *
96          * read-only caches:
97          *
98          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
99          * MI_READ_FLUSH is set, and is always flushed on 965.
100          *
101          * I915_GEM_DOMAIN_COMMAND may not exist?
102          *
103          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
104          * invalidated when MI_EXE_FLUSH is set.
105          *
106          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
107          * invalidated with every MI_FLUSH.
108          *
109          * TLBs:
110          *
111          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
112          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
113          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
114          * are flushed at any MI_FLUSH.
115          */
116
117         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
118         if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
119                 cmd &= ~MI_NO_WRITE_FLUSH;
120         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
121                 cmd |= MI_EXE_FLUSH;
122
123         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
124             (IS_G4X(dev) || IS_GEN5(dev)))
125                 cmd |= MI_INVALIDATE_ISP;
126
127         ret = intel_ring_begin(ring, 2);
128         if (ret)
129                 return ret;
130
131         intel_ring_emit(ring, cmd);
132         intel_ring_emit(ring, MI_NOOP);
133         intel_ring_advance(ring);
134
135         return 0;
136 }
137
138 /**
139  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
140  * implementing two workarounds on gen6.  From section 1.4.7.1
141  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
142  *
143  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
144  * produced by non-pipelined state commands), software needs to first
145  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
146  * 0.
147  *
148  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
149  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
150  *
151  * And the workaround for these two requires this workaround first:
152  *
153  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
154  * BEFORE the pipe-control with a post-sync op and no write-cache
155  * flushes.
156  *
157  * And this last workaround is tricky because of the requirements on
158  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
159  * volume 2 part 1:
160  *
161  *     "1 of the following must also be set:
162  *      - Render Target Cache Flush Enable ([12] of DW1)
163  *      - Depth Cache Flush Enable ([0] of DW1)
164  *      - Stall at Pixel Scoreboard ([1] of DW1)
165  *      - Depth Stall ([13] of DW1)
166  *      - Post-Sync Operation ([13] of DW1)
167  *      - Notify Enable ([8] of DW1)"
168  *
169  * The cache flushes require the workaround flush that triggered this
170  * one, so we can't use it.  Depth stall would trigger the same.
171  * Post-sync nonzero is what triggered this second workaround, so we
172  * can't use that one either.  Notify enable is IRQs, which aren't
173  * really our business.  That leaves only stall at scoreboard.
174  */
175 static int
176 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
177 {
178         u32 scratch_addr = ring->scratch.gtt_offset + 128;
179         int ret;
180
181
182         ret = intel_ring_begin(ring, 6);
183         if (ret)
184                 return ret;
185
186         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
187         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
188                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
189         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
190         intel_ring_emit(ring, 0); /* low dword */
191         intel_ring_emit(ring, 0); /* high dword */
192         intel_ring_emit(ring, MI_NOOP);
193         intel_ring_advance(ring);
194
195         ret = intel_ring_begin(ring, 6);
196         if (ret)
197                 return ret;
198
199         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
200         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
201         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
202         intel_ring_emit(ring, 0);
203         intel_ring_emit(ring, 0);
204         intel_ring_emit(ring, MI_NOOP);
205         intel_ring_advance(ring);
206
207         return 0;
208 }
209
210 static int
211 gen6_render_ring_flush(struct intel_ring_buffer *ring,
212                          u32 invalidate_domains, u32 flush_domains)
213 {
214         u32 flags = 0;
215         u32 scratch_addr = ring->scratch.gtt_offset + 128;
216         int ret;
217
218         /* Force SNB workarounds for PIPE_CONTROL flushes */
219         ret = intel_emit_post_sync_nonzero_flush(ring);
220         if (ret)
221                 return ret;
222
223         /* Just flush everything.  Experiments have shown that reducing the
224          * number of bits based on the write domains has little performance
225          * impact.
226          */
227         if (flush_domains) {
228                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
229                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
230                 /*
231                  * Ensure that any following seqno writes only happen
232                  * when the render cache is indeed flushed.
233                  */
234                 flags |= PIPE_CONTROL_CS_STALL;
235         }
236         if (invalidate_domains) {
237                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
238                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
239                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
240                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
241                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
242                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
243                 /*
244                  * TLB invalidate requires a post-sync write.
245                  */
246                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
247         }
248
249         ret = intel_ring_begin(ring, 4);
250         if (ret)
251                 return ret;
252
253         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
254         intel_ring_emit(ring, flags);
255         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
256         intel_ring_emit(ring, 0);
257         intel_ring_advance(ring);
258
259         return 0;
260 }
261
262 static int
263 gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
264 {
265         int ret;
266
267         ret = intel_ring_begin(ring, 4);
268         if (ret)
269                 return ret;
270
271         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
272         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
273                               PIPE_CONTROL_STALL_AT_SCOREBOARD);
274         intel_ring_emit(ring, 0);
275         intel_ring_emit(ring, 0);
276         intel_ring_advance(ring);
277
278         return 0;
279 }
280
281 static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
282 {
283         int ret;
284
285         if (!ring->fbc_dirty)
286                 return 0;
287
288         ret = intel_ring_begin(ring, 4);
289         if (ret)
290                 return ret;
291         intel_ring_emit(ring, MI_NOOP);
292         /* WaFbcNukeOn3DBlt:ivb/hsw */
293         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
294         intel_ring_emit(ring, MSG_FBC_REND_STATE);
295         intel_ring_emit(ring, value);
296         intel_ring_advance(ring);
297
298         ring->fbc_dirty = false;
299         return 0;
300 }
301
302 static int
303 gen7_render_ring_flush(struct intel_ring_buffer *ring,
304                        u32 invalidate_domains, u32 flush_domains)
305 {
306         u32 flags = 0;
307         u32 scratch_addr = ring->scratch.gtt_offset + 128;
308         int ret;
309
310         /*
311          * Ensure that any following seqno writes only happen when the render
312          * cache is indeed flushed.
313          *
314          * Workaround: 4th PIPE_CONTROL command (except the ones with only
315          * read-cache invalidate bits set) must have the CS_STALL bit set. We
316          * don't try to be clever and just set it unconditionally.
317          */
318         flags |= PIPE_CONTROL_CS_STALL;
319
320         /* Just flush everything.  Experiments have shown that reducing the
321          * number of bits based on the write domains has little performance
322          * impact.
323          */
324         if (flush_domains) {
325                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
326                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
327         }
328         if (invalidate_domains) {
329                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
330                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
331                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
332                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
333                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
334                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
335                 /*
336                  * TLB invalidate requires a post-sync write.
337                  */
338                 flags |= PIPE_CONTROL_QW_WRITE;
339                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
340
341                 /* Workaround: we must issue a pipe_control with CS-stall bit
342                  * set before a pipe_control command that has the state cache
343                  * invalidate bit set. */
344                 gen7_render_ring_cs_stall_wa(ring);
345         }
346
347         ret = intel_ring_begin(ring, 4);
348         if (ret)
349                 return ret;
350
351         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
352         intel_ring_emit(ring, flags);
353         intel_ring_emit(ring, scratch_addr);
354         intel_ring_emit(ring, 0);
355         intel_ring_advance(ring);
356
357         if (flush_domains)
358                 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
359
360         return 0;
361 }
362
363 static int
364 gen8_render_ring_flush(struct intel_ring_buffer *ring,
365                        u32 invalidate_domains, u32 flush_domains)
366 {
367         u32 flags = 0;
368         u32 scratch_addr = ring->scratch.gtt_offset + 128;
369         int ret;
370
371         flags |= PIPE_CONTROL_CS_STALL;
372
373         if (flush_domains) {
374                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
375                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
376         }
377         if (invalidate_domains) {
378                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
379                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
380                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
381                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
382                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
383                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
384                 flags |= PIPE_CONTROL_QW_WRITE;
385                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
386         }
387
388         ret = intel_ring_begin(ring, 6);
389         if (ret)
390                 return ret;
391
392         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
393         intel_ring_emit(ring, flags);
394         intel_ring_emit(ring, scratch_addr);
395         intel_ring_emit(ring, 0);
396         intel_ring_emit(ring, 0);
397         intel_ring_emit(ring, 0);
398         intel_ring_advance(ring);
399
400         return 0;
401
402 }
403
404 static void ring_write_tail(struct intel_ring_buffer *ring,
405                             u32 value)
406 {
407         drm_i915_private_t *dev_priv = ring->dev->dev_private;
408         I915_WRITE_TAIL(ring, value);
409 }
410
411 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
412 {
413         drm_i915_private_t *dev_priv = ring->dev->dev_private;
414         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
415                         RING_ACTHD(ring->mmio_base) : ACTHD;
416
417         return I915_READ(acthd_reg);
418 }
419
420 static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
421 {
422         struct drm_i915_private *dev_priv = ring->dev->dev_private;
423         u32 addr;
424
425         addr = dev_priv->status_page_dmah->busaddr;
426         if (INTEL_INFO(ring->dev)->gen >= 4)
427                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
428         I915_WRITE(HWS_PGA, addr);
429 }
430
431 static int init_ring_common(struct intel_ring_buffer *ring)
432 {
433         struct drm_device *dev = ring->dev;
434         drm_i915_private_t *dev_priv = dev->dev_private;
435         struct drm_i915_gem_object *obj = ring->obj;
436         int ret = 0;
437         u32 head;
438
439         gen6_gt_force_wake_get(dev_priv);
440
441         if (I915_NEED_GFX_HWS(dev))
442                 intel_ring_setup_status_page(ring);
443         else
444                 ring_setup_phys_status_page(ring);
445
446         /* Stop the ring if it's running. */
447         I915_WRITE_CTL(ring, 0);
448         I915_WRITE_HEAD(ring, 0);
449         ring->write_tail(ring, 0);
450
451         head = I915_READ_HEAD(ring) & HEAD_ADDR;
452
453         /* G45 ring initialization fails to reset head to zero */
454         if (head != 0) {
455                 DRM_DEBUG_KMS("%s head not reset to zero "
456                               "ctl %08x head %08x tail %08x start %08x\n",
457                               ring->name,
458                               I915_READ_CTL(ring),
459                               I915_READ_HEAD(ring),
460                               I915_READ_TAIL(ring),
461                               I915_READ_START(ring));
462
463                 I915_WRITE_HEAD(ring, 0);
464
465                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
466                         DRM_ERROR("failed to set %s head to zero "
467                                   "ctl %08x head %08x tail %08x start %08x\n",
468                                   ring->name,
469                                   I915_READ_CTL(ring),
470                                   I915_READ_HEAD(ring),
471                                   I915_READ_TAIL(ring),
472                                   I915_READ_START(ring));
473                 }
474         }
475
476         /* Initialize the ring. This must happen _after_ we've cleared the ring
477          * registers with the above sequence (the readback of the HEAD registers
478          * also enforces ordering), otherwise the hw might lose the new ring
479          * register values. */
480         I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
481         I915_WRITE_CTL(ring,
482                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
483                         | RING_VALID);
484
485         /* If the head is still not zero, the ring is dead */
486         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
487                      I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
488                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
489                 DRM_ERROR("%s initialization failed "
490                                 "ctl %08x head %08x tail %08x start %08x\n",
491                                 ring->name,
492                                 I915_READ_CTL(ring),
493                                 I915_READ_HEAD(ring),
494                                 I915_READ_TAIL(ring),
495                                 I915_READ_START(ring));
496                 ret = -EIO;
497                 goto out;
498         }
499
500         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
501                 i915_kernel_lost_context(ring->dev);
502         else {
503                 ring->head = I915_READ_HEAD(ring);
504                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
505                 ring->space = ring_space(ring);
506                 ring->last_retired_head = -1;
507         }
508
509         memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
510
511 out:
512         gen6_gt_force_wake_put(dev_priv);
513
514         return ret;
515 }
516
517 static int
518 init_pipe_control(struct intel_ring_buffer *ring)
519 {
520         int ret;
521
522         if (ring->scratch.obj)
523                 return 0;
524
525         ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
526         if (ring->scratch.obj == NULL) {
527                 DRM_ERROR("Failed to allocate seqno page\n");
528                 ret = -ENOMEM;
529                 goto err;
530         }
531
532         i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
533
534         ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
535         if (ret)
536                 goto err_unref;
537
538         ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
539         ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
540         if (ring->scratch.cpu_page == NULL) {
541                 ret = -ENOMEM;
542                 goto err_unpin;
543         }
544
545         DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
546                          ring->name, ring->scratch.gtt_offset);
547         return 0;
548
549 err_unpin:
550         i915_gem_object_unpin(ring->scratch.obj);
551 err_unref:
552         drm_gem_object_unreference(&ring->scratch.obj->base);
553 err:
554         return ret;
555 }
556
557 static int init_render_ring(struct intel_ring_buffer *ring)
558 {
559         struct drm_device *dev = ring->dev;
560         struct drm_i915_private *dev_priv = dev->dev_private;
561         int ret = init_ring_common(ring);
562
563         if (INTEL_INFO(dev)->gen > 3)
564                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
565
566         /* We need to disable the AsyncFlip performance optimisations in order
567          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
568          * programmed to '1' on all products.
569          *
570          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
571          */
572         if (INTEL_INFO(dev)->gen >= 6)
573                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
574
575         /* Required for the hardware to program scanline values for waiting */
576         if (INTEL_INFO(dev)->gen == 6)
577                 I915_WRITE(GFX_MODE,
578                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
579
580         if (IS_GEN7(dev))
581                 I915_WRITE(GFX_MODE_GEN7,
582                            _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
583                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
584
585         if (INTEL_INFO(dev)->gen >= 5) {
586                 ret = init_pipe_control(ring);
587                 if (ret)
588                         return ret;
589         }
590
591         if (IS_GEN6(dev)) {
592                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
593                  * "If this bit is set, STCunit will have LRA as replacement
594                  *  policy. [...] This bit must be reset.  LRA replacement
595                  *  policy is not supported."
596                  */
597                 I915_WRITE(CACHE_MODE_0,
598                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
599
600                 /* This is not explicitly set for GEN6, so read the register.
601                  * see intel_ring_mi_set_context() for why we care.
602                  * TODO: consider explicitly setting the bit for GEN5
603                  */
604                 ring->itlb_before_ctx_switch =
605                         !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
606         }
607
608         if (INTEL_INFO(dev)->gen >= 6)
609                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
610
611         if (HAS_L3_DPF(dev))
612                 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
613
614         return ret;
615 }
616
617 static void render_ring_cleanup(struct intel_ring_buffer *ring)
618 {
619         struct drm_device *dev = ring->dev;
620
621         if (ring->scratch.obj == NULL)
622                 return;
623
624         if (INTEL_INFO(dev)->gen >= 5) {
625                 kunmap(sg_page(ring->scratch.obj->pages->sgl));
626                 i915_gem_object_unpin(ring->scratch.obj);
627         }
628
629         drm_gem_object_unreference(&ring->scratch.obj->base);
630         ring->scratch.obj = NULL;
631 }
632
633 static void
634 update_mboxes(struct intel_ring_buffer *ring,
635               u32 mmio_offset)
636 {
637 /* NB: In order to be able to do semaphore MBOX updates for varying number
638  * of rings, it's easiest if we round up each individual update to a
639  * multiple of 2 (since ring updates must always be a multiple of 2)
640  * even though the actual update only requires 3 dwords.
641  */
642 #define MBOX_UPDATE_DWORDS 4
643         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
644         intel_ring_emit(ring, mmio_offset);
645         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
646         intel_ring_emit(ring, MI_NOOP);
647 }
648
649 /**
650  * gen6_add_request - Update the semaphore mailbox registers
651  * 
652  * @ring - ring that is adding a request
653  * @seqno - return seqno stuck into the ring
654  *
655  * Update the mailbox registers in the *other* rings with the current seqno.
656  * This acts like a signal in the canonical semaphore.
657  */
658 static int
659 gen6_add_request(struct intel_ring_buffer *ring)
660 {
661         struct drm_device *dev = ring->dev;
662         struct drm_i915_private *dev_priv = dev->dev_private;
663         struct intel_ring_buffer *useless;
664         int i, ret;
665
666         ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
667                                       MBOX_UPDATE_DWORDS) +
668                                       4);
669         if (ret)
670                 return ret;
671 #undef MBOX_UPDATE_DWORDS
672
673         for_each_ring(useless, dev_priv, i) {
674                 u32 mbox_reg = ring->signal_mbox[i];
675                 if (mbox_reg != GEN6_NOSYNC)
676                         update_mboxes(ring, mbox_reg);
677         }
678
679         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
680         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
681         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
682         intel_ring_emit(ring, MI_USER_INTERRUPT);
683         __intel_ring_advance(ring);
684
685         return 0;
686 }
687
688 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
689                                               u32 seqno)
690 {
691         struct drm_i915_private *dev_priv = dev->dev_private;
692         return dev_priv->last_seqno < seqno;
693 }
694
695 /**
696  * intel_ring_sync - sync the waiter to the signaller on seqno
697  *
698  * @waiter - ring that is waiting
699  * @signaller - ring which has, or will signal
700  * @seqno - seqno which the waiter will block on
701  */
702 static int
703 gen6_ring_sync(struct intel_ring_buffer *waiter,
704                struct intel_ring_buffer *signaller,
705                u32 seqno)
706 {
707         int ret;
708         u32 dw1 = MI_SEMAPHORE_MBOX |
709                   MI_SEMAPHORE_COMPARE |
710                   MI_SEMAPHORE_REGISTER;
711
712         /* Throughout all of the GEM code, seqno passed implies our current
713          * seqno is >= the last seqno executed. However for hardware the
714          * comparison is strictly greater than.
715          */
716         seqno -= 1;
717
718         WARN_ON(signaller->semaphore_register[waiter->id] ==
719                 MI_SEMAPHORE_SYNC_INVALID);
720
721         ret = intel_ring_begin(waiter, 4);
722         if (ret)
723                 return ret;
724
725         /* If seqno wrap happened, omit the wait with no-ops */
726         if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
727                 intel_ring_emit(waiter,
728                                 dw1 |
729                                 signaller->semaphore_register[waiter->id]);
730                 intel_ring_emit(waiter, seqno);
731                 intel_ring_emit(waiter, 0);
732                 intel_ring_emit(waiter, MI_NOOP);
733         } else {
734                 intel_ring_emit(waiter, MI_NOOP);
735                 intel_ring_emit(waiter, MI_NOOP);
736                 intel_ring_emit(waiter, MI_NOOP);
737                 intel_ring_emit(waiter, MI_NOOP);
738         }
739         intel_ring_advance(waiter);
740
741         return 0;
742 }
743
744 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
745 do {                                                                    \
746         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
747                  PIPE_CONTROL_DEPTH_STALL);                             \
748         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
749         intel_ring_emit(ring__, 0);                                                     \
750         intel_ring_emit(ring__, 0);                                                     \
751 } while (0)
752
753 static int
754 pc_render_add_request(struct intel_ring_buffer *ring)
755 {
756         u32 scratch_addr = ring->scratch.gtt_offset + 128;
757         int ret;
758
759         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
760          * incoherent with writes to memory, i.e. completely fubar,
761          * so we need to use PIPE_NOTIFY instead.
762          *
763          * However, we also need to workaround the qword write
764          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
765          * memory before requesting an interrupt.
766          */
767         ret = intel_ring_begin(ring, 32);
768         if (ret)
769                 return ret;
770
771         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
772                         PIPE_CONTROL_WRITE_FLUSH |
773                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
774         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
775         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
776         intel_ring_emit(ring, 0);
777         PIPE_CONTROL_FLUSH(ring, scratch_addr);
778         scratch_addr += 128; /* write to separate cachelines */
779         PIPE_CONTROL_FLUSH(ring, scratch_addr);
780         scratch_addr += 128;
781         PIPE_CONTROL_FLUSH(ring, scratch_addr);
782         scratch_addr += 128;
783         PIPE_CONTROL_FLUSH(ring, scratch_addr);
784         scratch_addr += 128;
785         PIPE_CONTROL_FLUSH(ring, scratch_addr);
786         scratch_addr += 128;
787         PIPE_CONTROL_FLUSH(ring, scratch_addr);
788
789         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
790                         PIPE_CONTROL_WRITE_FLUSH |
791                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
792                         PIPE_CONTROL_NOTIFY);
793         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
794         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
795         intel_ring_emit(ring, 0);
796         __intel_ring_advance(ring);
797
798         return 0;
799 }
800
801 static u32
802 gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
803 {
804         /* Workaround to force correct ordering between irq and seqno writes on
805          * ivb (and maybe also on snb) by reading from a CS register (like
806          * ACTHD) before reading the status page. */
807         if (!lazy_coherency)
808                 intel_ring_get_active_head(ring);
809         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
810 }
811
812 static u32
813 ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
814 {
815         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
816 }
817
818 static void
819 ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
820 {
821         intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
822 }
823
824 static u32
825 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
826 {
827         return ring->scratch.cpu_page[0];
828 }
829
830 static void
831 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
832 {
833         ring->scratch.cpu_page[0] = seqno;
834 }
835
836 static bool
837 gen5_ring_get_irq(struct intel_ring_buffer *ring)
838 {
839         struct drm_device *dev = ring->dev;
840         drm_i915_private_t *dev_priv = dev->dev_private;
841         unsigned long flags;
842
843         if (!dev->irq_enabled)
844                 return false;
845
846         spin_lock_irqsave(&dev_priv->irq_lock, flags);
847         if (ring->irq_refcount++ == 0)
848                 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
849         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
850
851         return true;
852 }
853
854 static void
855 gen5_ring_put_irq(struct intel_ring_buffer *ring)
856 {
857         struct drm_device *dev = ring->dev;
858         drm_i915_private_t *dev_priv = dev->dev_private;
859         unsigned long flags;
860
861         spin_lock_irqsave(&dev_priv->irq_lock, flags);
862         if (--ring->irq_refcount == 0)
863                 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
864         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
865 }
866
867 static bool
868 i9xx_ring_get_irq(struct intel_ring_buffer *ring)
869 {
870         struct drm_device *dev = ring->dev;
871         drm_i915_private_t *dev_priv = dev->dev_private;
872         unsigned long flags;
873
874         if (!dev->irq_enabled)
875                 return false;
876
877         spin_lock_irqsave(&dev_priv->irq_lock, flags);
878         if (ring->irq_refcount++ == 0) {
879                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
880                 I915_WRITE(IMR, dev_priv->irq_mask);
881                 POSTING_READ(IMR);
882         }
883         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
884
885         return true;
886 }
887
888 static void
889 i9xx_ring_put_irq(struct intel_ring_buffer *ring)
890 {
891         struct drm_device *dev = ring->dev;
892         drm_i915_private_t *dev_priv = dev->dev_private;
893         unsigned long flags;
894
895         spin_lock_irqsave(&dev_priv->irq_lock, flags);
896         if (--ring->irq_refcount == 0) {
897                 dev_priv->irq_mask |= ring->irq_enable_mask;
898                 I915_WRITE(IMR, dev_priv->irq_mask);
899                 POSTING_READ(IMR);
900         }
901         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
902 }
903
904 static bool
905 i8xx_ring_get_irq(struct intel_ring_buffer *ring)
906 {
907         struct drm_device *dev = ring->dev;
908         drm_i915_private_t *dev_priv = dev->dev_private;
909         unsigned long flags;
910
911         if (!dev->irq_enabled)
912                 return false;
913
914         spin_lock_irqsave(&dev_priv->irq_lock, flags);
915         if (ring->irq_refcount++ == 0) {
916                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
917                 I915_WRITE16(IMR, dev_priv->irq_mask);
918                 POSTING_READ16(IMR);
919         }
920         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
921
922         return true;
923 }
924
925 static void
926 i8xx_ring_put_irq(struct intel_ring_buffer *ring)
927 {
928         struct drm_device *dev = ring->dev;
929         drm_i915_private_t *dev_priv = dev->dev_private;
930         unsigned long flags;
931
932         spin_lock_irqsave(&dev_priv->irq_lock, flags);
933         if (--ring->irq_refcount == 0) {
934                 dev_priv->irq_mask |= ring->irq_enable_mask;
935                 I915_WRITE16(IMR, dev_priv->irq_mask);
936                 POSTING_READ16(IMR);
937         }
938         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
939 }
940
941 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
942 {
943         struct drm_device *dev = ring->dev;
944         drm_i915_private_t *dev_priv = ring->dev->dev_private;
945         u32 mmio = 0;
946
947         /* The ring status page addresses are no longer next to the rest of
948          * the ring registers as of gen7.
949          */
950         if (IS_GEN7(dev)) {
951                 switch (ring->id) {
952                 case RCS:
953                         mmio = RENDER_HWS_PGA_GEN7;
954                         break;
955                 case BCS:
956                         mmio = BLT_HWS_PGA_GEN7;
957                         break;
958                 case VCS:
959                         mmio = BSD_HWS_PGA_GEN7;
960                         break;
961                 case VECS:
962                         mmio = VEBOX_HWS_PGA_GEN7;
963                         break;
964                 }
965         } else if (IS_GEN6(ring->dev)) {
966                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
967         } else {
968                 /* XXX: gen8 returns to sanity */
969                 mmio = RING_HWS_PGA(ring->mmio_base);
970         }
971
972         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
973         POSTING_READ(mmio);
974
975         /* Flush the TLB for this page */
976         if (INTEL_INFO(dev)->gen >= 6) {
977                 u32 reg = RING_INSTPM(ring->mmio_base);
978                 I915_WRITE(reg,
979                            _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
980                                               INSTPM_SYNC_FLUSH));
981                 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
982                              1000))
983                         DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
984                                   ring->name);
985         }
986 }
987
988 static int
989 bsd_ring_flush(struct intel_ring_buffer *ring,
990                u32     invalidate_domains,
991                u32     flush_domains)
992 {
993         int ret;
994
995         ret = intel_ring_begin(ring, 2);
996         if (ret)
997                 return ret;
998
999         intel_ring_emit(ring, MI_FLUSH);
1000         intel_ring_emit(ring, MI_NOOP);
1001         intel_ring_advance(ring);
1002         return 0;
1003 }
1004
1005 static int
1006 i9xx_add_request(struct intel_ring_buffer *ring)
1007 {
1008         int ret;
1009
1010         ret = intel_ring_begin(ring, 4);
1011         if (ret)
1012                 return ret;
1013
1014         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1015         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1016         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1017         intel_ring_emit(ring, MI_USER_INTERRUPT);
1018         __intel_ring_advance(ring);
1019
1020         return 0;
1021 }
1022
1023 static bool
1024 gen6_ring_get_irq(struct intel_ring_buffer *ring)
1025 {
1026         struct drm_device *dev = ring->dev;
1027         drm_i915_private_t *dev_priv = dev->dev_private;
1028         unsigned long flags;
1029
1030         if (!dev->irq_enabled)
1031                return false;
1032
1033         /* It looks like we need to prevent the gt from suspending while waiting
1034          * for an notifiy irq, otherwise irqs seem to get lost on at least the
1035          * blt/bsd rings on ivb. */
1036         gen6_gt_force_wake_get(dev_priv);
1037
1038         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1039         if (ring->irq_refcount++ == 0) {
1040                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1041                         I915_WRITE_IMR(ring,
1042                                        ~(ring->irq_enable_mask |
1043                                          GT_PARITY_ERROR(dev)));
1044                 else
1045                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1046                 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1047         }
1048         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1049
1050         return true;
1051 }
1052
1053 static void
1054 gen6_ring_put_irq(struct intel_ring_buffer *ring)
1055 {
1056         struct drm_device *dev = ring->dev;
1057         drm_i915_private_t *dev_priv = dev->dev_private;
1058         unsigned long flags;
1059
1060         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1061         if (--ring->irq_refcount == 0) {
1062                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1063                         I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1064                 else
1065                         I915_WRITE_IMR(ring, ~0);
1066                 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1067         }
1068         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1069
1070         gen6_gt_force_wake_put(dev_priv);
1071 }
1072
1073 static bool
1074 hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1075 {
1076         struct drm_device *dev = ring->dev;
1077         struct drm_i915_private *dev_priv = dev->dev_private;
1078         unsigned long flags;
1079
1080         if (!dev->irq_enabled)
1081                 return false;
1082
1083         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1084         if (ring->irq_refcount++ == 0) {
1085                 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1086                 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1087         }
1088         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1089
1090         return true;
1091 }
1092
1093 static void
1094 hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1095 {
1096         struct drm_device *dev = ring->dev;
1097         struct drm_i915_private *dev_priv = dev->dev_private;
1098         unsigned long flags;
1099
1100         if (!dev->irq_enabled)
1101                 return;
1102
1103         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1104         if (--ring->irq_refcount == 0) {
1105                 I915_WRITE_IMR(ring, ~0);
1106                 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1107         }
1108         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1109 }
1110
1111 static bool
1112 gen8_ring_get_irq(struct intel_ring_buffer *ring)
1113 {
1114         struct drm_device *dev = ring->dev;
1115         struct drm_i915_private *dev_priv = dev->dev_private;
1116         unsigned long flags;
1117
1118         if (!dev->irq_enabled)
1119                 return false;
1120
1121         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1122         if (ring->irq_refcount++ == 0) {
1123                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1124                         I915_WRITE_IMR(ring,
1125                                        ~(ring->irq_enable_mask |
1126                                          GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1127                 } else {
1128                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1129                 }
1130                 POSTING_READ(RING_IMR(ring->mmio_base));
1131         }
1132         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1133
1134         return true;
1135 }
1136
1137 static void
1138 gen8_ring_put_irq(struct intel_ring_buffer *ring)
1139 {
1140         struct drm_device *dev = ring->dev;
1141         struct drm_i915_private *dev_priv = dev->dev_private;
1142         unsigned long flags;
1143
1144         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1145         if (--ring->irq_refcount == 0) {
1146                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1147                         I915_WRITE_IMR(ring,
1148                                        ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1149                 } else {
1150                         I915_WRITE_IMR(ring, ~0);
1151                 }
1152                 POSTING_READ(RING_IMR(ring->mmio_base));
1153         }
1154         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1155 }
1156
1157 static int
1158 i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1159                          u32 offset, u32 length,
1160                          unsigned flags)
1161 {
1162         int ret;
1163
1164         ret = intel_ring_begin(ring, 2);
1165         if (ret)
1166                 return ret;
1167
1168         intel_ring_emit(ring,
1169                         MI_BATCH_BUFFER_START |
1170                         MI_BATCH_GTT |
1171                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1172         intel_ring_emit(ring, offset);
1173         intel_ring_advance(ring);
1174
1175         return 0;
1176 }
1177
1178 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1179 #define I830_BATCH_LIMIT (256*1024)
1180 static int
1181 i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1182                                 u32 offset, u32 len,
1183                                 unsigned flags)
1184 {
1185         int ret;
1186
1187         if (flags & I915_DISPATCH_PINNED) {
1188                 ret = intel_ring_begin(ring, 4);
1189                 if (ret)
1190                         return ret;
1191
1192                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1193                 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1194                 intel_ring_emit(ring, offset + len - 8);
1195                 intel_ring_emit(ring, MI_NOOP);
1196                 intel_ring_advance(ring);
1197         } else {
1198                 u32 cs_offset = ring->scratch.gtt_offset;
1199
1200                 if (len > I830_BATCH_LIMIT)
1201                         return -ENOSPC;
1202
1203                 ret = intel_ring_begin(ring, 9+3);
1204                 if (ret)
1205                         return ret;
1206                 /* Blit the batch (which has now all relocs applied) to the stable batch
1207                  * scratch bo area (so that the CS never stumbles over its tlb
1208                  * invalidation bug) ... */
1209                 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1210                                 XY_SRC_COPY_BLT_WRITE_ALPHA |
1211                                 XY_SRC_COPY_BLT_WRITE_RGB);
1212                 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1213                 intel_ring_emit(ring, 0);
1214                 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1215                 intel_ring_emit(ring, cs_offset);
1216                 intel_ring_emit(ring, 0);
1217                 intel_ring_emit(ring, 4096);
1218                 intel_ring_emit(ring, offset);
1219                 intel_ring_emit(ring, MI_FLUSH);
1220
1221                 /* ... and execute it. */
1222                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1223                 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1224                 intel_ring_emit(ring, cs_offset + len - 8);
1225                 intel_ring_advance(ring);
1226         }
1227
1228         return 0;
1229 }
1230
1231 static int
1232 i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1233                          u32 offset, u32 len,
1234                          unsigned flags)
1235 {
1236         int ret;
1237
1238         ret = intel_ring_begin(ring, 2);
1239         if (ret)
1240                 return ret;
1241
1242         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1243         intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1244         intel_ring_advance(ring);
1245
1246         return 0;
1247 }
1248
1249 static void cleanup_status_page(struct intel_ring_buffer *ring)
1250 {
1251         struct drm_i915_gem_object *obj;
1252
1253         obj = ring->status_page.obj;
1254         if (obj == NULL)
1255                 return;
1256
1257         kunmap(sg_page(obj->pages->sgl));
1258         i915_gem_object_unpin(obj);
1259         drm_gem_object_unreference(&obj->base);
1260         ring->status_page.obj = NULL;
1261 }
1262
1263 static int init_status_page(struct intel_ring_buffer *ring)
1264 {
1265         struct drm_device *dev = ring->dev;
1266         struct drm_i915_gem_object *obj;
1267         int ret;
1268
1269         obj = i915_gem_alloc_object(dev, 4096);
1270         if (obj == NULL) {
1271                 DRM_ERROR("Failed to allocate status page\n");
1272                 ret = -ENOMEM;
1273                 goto err;
1274         }
1275
1276         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1277
1278         ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
1279         if (ret != 0) {
1280                 goto err_unref;
1281         }
1282
1283         ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1284         ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1285         if (ring->status_page.page_addr == NULL) {
1286                 ret = -ENOMEM;
1287                 goto err_unpin;
1288         }
1289         ring->status_page.obj = obj;
1290         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1291
1292         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1293                         ring->name, ring->status_page.gfx_addr);
1294
1295         return 0;
1296
1297 err_unpin:
1298         i915_gem_object_unpin(obj);
1299 err_unref:
1300         drm_gem_object_unreference(&obj->base);
1301 err:
1302         return ret;
1303 }
1304
1305 static int init_phys_status_page(struct intel_ring_buffer *ring)
1306 {
1307         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1308
1309         if (!dev_priv->status_page_dmah) {
1310                 dev_priv->status_page_dmah =
1311                         drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1312                 if (!dev_priv->status_page_dmah)
1313                         return -ENOMEM;
1314         }
1315
1316         ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1317         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1318
1319         return 0;
1320 }
1321
1322 static int intel_init_ring_buffer(struct drm_device *dev,
1323                                   struct intel_ring_buffer *ring)
1324 {
1325         struct drm_i915_gem_object *obj;
1326         struct drm_i915_private *dev_priv = dev->dev_private;
1327         int ret;
1328
1329         ring->dev = dev;
1330         INIT_LIST_HEAD(&ring->active_list);
1331         INIT_LIST_HEAD(&ring->request_list);
1332         ring->size = 32 * PAGE_SIZE;
1333         memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
1334
1335         init_waitqueue_head(&ring->irq_queue);
1336
1337         if (I915_NEED_GFX_HWS(dev)) {
1338                 ret = init_status_page(ring);
1339                 if (ret)
1340                         return ret;
1341         } else {
1342                 BUG_ON(ring->id != RCS);
1343                 ret = init_phys_status_page(ring);
1344                 if (ret)
1345                         return ret;
1346         }
1347
1348         obj = NULL;
1349         if (!HAS_LLC(dev))
1350                 obj = i915_gem_object_create_stolen(dev, ring->size);
1351         if (obj == NULL)
1352                 obj = i915_gem_alloc_object(dev, ring->size);
1353         if (obj == NULL) {
1354                 DRM_ERROR("Failed to allocate ringbuffer\n");
1355                 ret = -ENOMEM;
1356                 goto err_hws;
1357         }
1358
1359         ring->obj = obj;
1360
1361         ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
1362         if (ret)
1363                 goto err_unref;
1364
1365         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1366         if (ret)
1367                 goto err_unpin;
1368
1369         ring->virtual_start =
1370                 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1371                            ring->size);
1372         if (ring->virtual_start == NULL) {
1373                 DRM_ERROR("Failed to map ringbuffer.\n");
1374                 ret = -EINVAL;
1375                 goto err_unpin;
1376         }
1377
1378         ret = ring->init(ring);
1379         if (ret)
1380                 goto err_unmap;
1381
1382         /* Workaround an erratum on the i830 which causes a hang if
1383          * the TAIL pointer points to within the last 2 cachelines
1384          * of the buffer.
1385          */
1386         ring->effective_size = ring->size;
1387         if (IS_I830(ring->dev) || IS_845G(ring->dev))
1388                 ring->effective_size -= 128;
1389
1390         return 0;
1391
1392 err_unmap:
1393         iounmap(ring->virtual_start);
1394 err_unpin:
1395         i915_gem_object_unpin(obj);
1396 err_unref:
1397         drm_gem_object_unreference(&obj->base);
1398         ring->obj = NULL;
1399 err_hws:
1400         cleanup_status_page(ring);
1401         return ret;
1402 }
1403
1404 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1405 {
1406         struct drm_i915_private *dev_priv;
1407         int ret;
1408
1409         if (ring->obj == NULL)
1410                 return;
1411
1412         /* Disable the ring buffer. The ring must be idle at this point */
1413         dev_priv = ring->dev->dev_private;
1414         ret = intel_ring_idle(ring);
1415         if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
1416                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1417                           ring->name, ret);
1418
1419         I915_WRITE_CTL(ring, 0);
1420
1421         iounmap(ring->virtual_start);
1422
1423         i915_gem_object_unpin(ring->obj);
1424         drm_gem_object_unreference(&ring->obj->base);
1425         ring->obj = NULL;
1426         ring->preallocated_lazy_request = NULL;
1427         ring->outstanding_lazy_seqno = 0;
1428
1429         if (ring->cleanup)
1430                 ring->cleanup(ring);
1431
1432         cleanup_status_page(ring);
1433 }
1434
1435 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1436 {
1437         int ret;
1438
1439         ret = i915_wait_seqno(ring, seqno);
1440         if (!ret)
1441                 i915_gem_retire_requests_ring(ring);
1442
1443         return ret;
1444 }
1445
1446 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1447 {
1448         struct drm_i915_gem_request *request;
1449         u32 seqno = 0;
1450         int ret;
1451
1452         i915_gem_retire_requests_ring(ring);
1453
1454         if (ring->last_retired_head != -1) {
1455                 ring->head = ring->last_retired_head;
1456                 ring->last_retired_head = -1;
1457                 ring->space = ring_space(ring);
1458                 if (ring->space >= n)
1459                         return 0;
1460         }
1461
1462         list_for_each_entry(request, &ring->request_list, list) {
1463                 int space;
1464
1465                 if (request->tail == -1)
1466                         continue;
1467
1468                 space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
1469                 if (space < 0)
1470                         space += ring->size;
1471                 if (space >= n) {
1472                         seqno = request->seqno;
1473                         break;
1474                 }
1475
1476                 /* Consume this request in case we need more space than
1477                  * is available and so need to prevent a race between
1478                  * updating last_retired_head and direct reads of
1479                  * I915_RING_HEAD. It also provides a nice sanity check.
1480                  */
1481                 request->tail = -1;
1482         }
1483
1484         if (seqno == 0)
1485                 return -ENOSPC;
1486
1487         ret = intel_ring_wait_seqno(ring, seqno);
1488         if (ret)
1489                 return ret;
1490
1491         if (WARN_ON(ring->last_retired_head == -1))
1492                 return -ENOSPC;
1493
1494         ring->head = ring->last_retired_head;
1495         ring->last_retired_head = -1;
1496         ring->space = ring_space(ring);
1497         if (WARN_ON(ring->space < n))
1498                 return -ENOSPC;
1499
1500         return 0;
1501 }
1502
1503 static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1504 {
1505         struct drm_device *dev = ring->dev;
1506         struct drm_i915_private *dev_priv = dev->dev_private;
1507         unsigned long end;
1508         int ret;
1509
1510         ret = intel_ring_wait_request(ring, n);
1511         if (ret != -ENOSPC)
1512                 return ret;
1513
1514         /* force the tail write in case we have been skipping them */
1515         __intel_ring_advance(ring);
1516
1517         trace_i915_ring_wait_begin(ring);
1518         /* With GEM the hangcheck timer should kick us out of the loop,
1519          * leaving it early runs the risk of corrupting GEM state (due
1520          * to running on almost untested codepaths). But on resume
1521          * timers don't work yet, so prevent a complete hang in that
1522          * case by choosing an insanely large timeout. */
1523         end = jiffies + 60 * HZ;
1524
1525         do {
1526                 ring->head = I915_READ_HEAD(ring);
1527                 ring->space = ring_space(ring);
1528                 if (ring->space >= n) {
1529                         trace_i915_ring_wait_end(ring);
1530                         return 0;
1531                 }
1532
1533                 if (dev->primary->master) {
1534                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1535                         if (master_priv->sarea_priv)
1536                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1537                 }
1538
1539                 msleep(1);
1540
1541                 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1542                                            dev_priv->mm.interruptible);
1543                 if (ret)
1544                         return ret;
1545         } while (!time_after(jiffies, end));
1546         trace_i915_ring_wait_end(ring);
1547         return -EBUSY;
1548 }
1549
1550 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1551 {
1552         uint32_t __iomem *virt;
1553         int rem = ring->size - ring->tail;
1554
1555         if (ring->space < rem) {
1556                 int ret = ring_wait_for_space(ring, rem);
1557                 if (ret)
1558                         return ret;
1559         }
1560
1561         virt = ring->virtual_start + ring->tail;
1562         rem /= 4;
1563         while (rem--)
1564                 iowrite32(MI_NOOP, virt++);
1565
1566         ring->tail = 0;
1567         ring->space = ring_space(ring);
1568
1569         return 0;
1570 }
1571
1572 int intel_ring_idle(struct intel_ring_buffer *ring)
1573 {
1574         u32 seqno;
1575         int ret;
1576
1577         /* We need to add any requests required to flush the objects and ring */
1578         if (ring->outstanding_lazy_seqno) {
1579                 ret = i915_add_request(ring, NULL);
1580                 if (ret)
1581                         return ret;
1582         }
1583
1584         /* Wait upon the last request to be completed */
1585         if (list_empty(&ring->request_list))
1586                 return 0;
1587
1588         seqno = list_entry(ring->request_list.prev,
1589                            struct drm_i915_gem_request,
1590                            list)->seqno;
1591
1592         return i915_wait_seqno(ring, seqno);
1593 }
1594
1595 static int
1596 intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1597 {
1598         if (ring->outstanding_lazy_seqno)
1599                 return 0;
1600
1601         if (ring->preallocated_lazy_request == NULL) {
1602                 struct drm_i915_gem_request *request;
1603
1604                 request = kmalloc(sizeof(*request), GFP_KERNEL);
1605                 if (request == NULL)
1606                         return -ENOMEM;
1607
1608                 ring->preallocated_lazy_request = request;
1609         }
1610
1611         return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1612 }
1613
1614 static int __intel_ring_begin(struct intel_ring_buffer *ring,
1615                               int bytes)
1616 {
1617         int ret;
1618
1619         if (unlikely(ring->tail + bytes > ring->effective_size)) {
1620                 ret = intel_wrap_ring_buffer(ring);
1621                 if (unlikely(ret))
1622                         return ret;
1623         }
1624
1625         if (unlikely(ring->space < bytes)) {
1626                 ret = ring_wait_for_space(ring, bytes);
1627                 if (unlikely(ret))
1628                         return ret;
1629         }
1630
1631         ring->space -= bytes;
1632         return 0;
1633 }
1634
1635 int intel_ring_begin(struct intel_ring_buffer *ring,
1636                      int num_dwords)
1637 {
1638         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1639         int ret;
1640
1641         ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1642                                    dev_priv->mm.interruptible);
1643         if (ret)
1644                 return ret;
1645
1646         /* Preallocate the olr before touching the ring */
1647         ret = intel_ring_alloc_seqno(ring);
1648         if (ret)
1649                 return ret;
1650
1651         return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
1652 }
1653
1654 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1655 {
1656         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1657
1658         BUG_ON(ring->outstanding_lazy_seqno);
1659
1660         if (INTEL_INFO(ring->dev)->gen >= 6) {
1661                 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1662                 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1663                 if (HAS_VEBOX(ring->dev))
1664                         I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
1665         }
1666
1667         ring->set_seqno(ring, seqno);
1668         ring->hangcheck.seqno = seqno;
1669 }
1670
1671 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1672                                      u32 value)
1673 {
1674         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1675
1676        /* Every tail move must follow the sequence below */
1677
1678         /* Disable notification that the ring is IDLE. The GT
1679          * will then assume that it is busy and bring it out of rc6.
1680          */
1681         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1682                    _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1683
1684         /* Clear the context id. Here be magic! */
1685         I915_WRITE64(GEN6_BSD_RNCID, 0x0);
1686
1687         /* Wait for the ring not to be idle, i.e. for it to wake up. */
1688         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1689                       GEN6_BSD_SLEEP_INDICATOR) == 0,
1690                      50))
1691                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1692
1693         /* Now that the ring is fully powered up, update the tail */
1694         I915_WRITE_TAIL(ring, value);
1695         POSTING_READ(RING_TAIL(ring->mmio_base));
1696
1697         /* Let the ring send IDLE messages to the GT again,
1698          * and so let it sleep to conserve power when idle.
1699          */
1700         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1701                    _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1702 }
1703
1704 static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1705                                u32 invalidate, u32 flush)
1706 {
1707         uint32_t cmd;
1708         int ret;
1709
1710         ret = intel_ring_begin(ring, 4);
1711         if (ret)
1712                 return ret;
1713
1714         cmd = MI_FLUSH_DW;
1715         if (INTEL_INFO(ring->dev)->gen >= 8)
1716                 cmd += 1;
1717         /*
1718          * Bspec vol 1c.5 - video engine command streamer:
1719          * "If ENABLED, all TLBs will be invalidated once the flush
1720          * operation is complete. This bit is only valid when the
1721          * Post-Sync Operation field is a value of 1h or 3h."
1722          */
1723         if (invalidate & I915_GEM_GPU_DOMAINS)
1724                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1725                         MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1726         intel_ring_emit(ring, cmd);
1727         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1728         if (INTEL_INFO(ring->dev)->gen >= 8) {
1729                 intel_ring_emit(ring, 0); /* upper addr */
1730                 intel_ring_emit(ring, 0); /* value */
1731         } else  {
1732                 intel_ring_emit(ring, 0);
1733                 intel_ring_emit(ring, MI_NOOP);
1734         }
1735         intel_ring_advance(ring);
1736         return 0;
1737 }
1738
1739 static int
1740 gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1741                               u32 offset, u32 len,
1742                               unsigned flags)
1743 {
1744         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1745         bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1746                 !(flags & I915_DISPATCH_SECURE);
1747         int ret;
1748
1749         ret = intel_ring_begin(ring, 4);
1750         if (ret)
1751                 return ret;
1752
1753         /* FIXME(BDW): Address space and security selectors. */
1754         intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1755         intel_ring_emit(ring, offset);
1756         intel_ring_emit(ring, 0);
1757         intel_ring_emit(ring, MI_NOOP);
1758         intel_ring_advance(ring);
1759
1760         return 0;
1761 }
1762
1763 static int
1764 hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1765                               u32 offset, u32 len,
1766                               unsigned flags)
1767 {
1768         int ret;
1769
1770         ret = intel_ring_begin(ring, 2);
1771         if (ret)
1772                 return ret;
1773
1774         intel_ring_emit(ring,
1775                         MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
1776                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
1777         /* bit0-7 is the length on GEN6+ */
1778         intel_ring_emit(ring, offset);
1779         intel_ring_advance(ring);
1780
1781         return 0;
1782 }
1783
1784 static int
1785 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1786                               u32 offset, u32 len,
1787                               unsigned flags)
1788 {
1789         int ret;
1790
1791         ret = intel_ring_begin(ring, 2);
1792         if (ret)
1793                 return ret;
1794
1795         intel_ring_emit(ring,
1796                         MI_BATCH_BUFFER_START |
1797                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1798         /* bit0-7 is the length on GEN6+ */
1799         intel_ring_emit(ring, offset);
1800         intel_ring_advance(ring);
1801
1802         return 0;
1803 }
1804
1805 /* Blitter support (SandyBridge+) */
1806
1807 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1808                            u32 invalidate, u32 flush)
1809 {
1810         struct drm_device *dev = ring->dev;
1811         uint32_t cmd;
1812         int ret;
1813
1814         ret = intel_ring_begin(ring, 4);
1815         if (ret)
1816                 return ret;
1817
1818         cmd = MI_FLUSH_DW;
1819         if (INTEL_INFO(ring->dev)->gen >= 8)
1820                 cmd += 1;
1821         /*
1822          * Bspec vol 1c.3 - blitter engine command streamer:
1823          * "If ENABLED, all TLBs will be invalidated once the flush
1824          * operation is complete. This bit is only valid when the
1825          * Post-Sync Operation field is a value of 1h or 3h."
1826          */
1827         if (invalidate & I915_GEM_DOMAIN_RENDER)
1828                 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1829                         MI_FLUSH_DW_OP_STOREDW;
1830         intel_ring_emit(ring, cmd);
1831         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1832         if (INTEL_INFO(ring->dev)->gen >= 8) {
1833                 intel_ring_emit(ring, 0); /* upper addr */
1834                 intel_ring_emit(ring, 0); /* value */
1835         } else  {
1836                 intel_ring_emit(ring, 0);
1837                 intel_ring_emit(ring, MI_NOOP);
1838         }
1839         intel_ring_advance(ring);
1840
1841         if (IS_GEN7(dev) && flush)
1842                 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
1843
1844         return 0;
1845 }
1846
1847 int intel_init_render_ring_buffer(struct drm_device *dev)
1848 {
1849         drm_i915_private_t *dev_priv = dev->dev_private;
1850         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1851
1852         ring->name = "render ring";
1853         ring->id = RCS;
1854         ring->mmio_base = RENDER_RING_BASE;
1855
1856         if (INTEL_INFO(dev)->gen >= 6) {
1857                 ring->add_request = gen6_add_request;
1858                 ring->flush = gen7_render_ring_flush;
1859                 if (INTEL_INFO(dev)->gen == 6)
1860                         ring->flush = gen6_render_ring_flush;
1861                 if (INTEL_INFO(dev)->gen >= 8) {
1862                         ring->flush = gen8_render_ring_flush;
1863                         ring->irq_get = gen8_ring_get_irq;
1864                         ring->irq_put = gen8_ring_put_irq;
1865                 } else {
1866                         ring->irq_get = gen6_ring_get_irq;
1867                         ring->irq_put = gen6_ring_put_irq;
1868                 }
1869                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1870                 ring->get_seqno = gen6_ring_get_seqno;
1871                 ring->set_seqno = ring_set_seqno;
1872                 ring->sync_to = gen6_ring_sync;
1873                 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1874                 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
1875                 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
1876                 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
1877                 ring->signal_mbox[RCS] = GEN6_NOSYNC;
1878                 ring->signal_mbox[VCS] = GEN6_VRSYNC;
1879                 ring->signal_mbox[BCS] = GEN6_BRSYNC;
1880                 ring->signal_mbox[VECS] = GEN6_VERSYNC;
1881         } else if (IS_GEN5(dev)) {
1882                 ring->add_request = pc_render_add_request;
1883                 ring->flush = gen4_render_ring_flush;
1884                 ring->get_seqno = pc_render_get_seqno;
1885                 ring->set_seqno = pc_render_set_seqno;
1886                 ring->irq_get = gen5_ring_get_irq;
1887                 ring->irq_put = gen5_ring_put_irq;
1888                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
1889                                         GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
1890         } else {
1891                 ring->add_request = i9xx_add_request;
1892                 if (INTEL_INFO(dev)->gen < 4)
1893                         ring->flush = gen2_render_ring_flush;
1894                 else
1895                         ring->flush = gen4_render_ring_flush;
1896                 ring->get_seqno = ring_get_seqno;
1897                 ring->set_seqno = ring_set_seqno;
1898                 if (IS_GEN2(dev)) {
1899                         ring->irq_get = i8xx_ring_get_irq;
1900                         ring->irq_put = i8xx_ring_put_irq;
1901                 } else {
1902                         ring->irq_get = i9xx_ring_get_irq;
1903                         ring->irq_put = i9xx_ring_put_irq;
1904                 }
1905                 ring->irq_enable_mask = I915_USER_INTERRUPT;
1906         }
1907         ring->write_tail = ring_write_tail;
1908         if (IS_HASWELL(dev))
1909                 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1910         else if (IS_GEN8(dev))
1911                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
1912         else if (INTEL_INFO(dev)->gen >= 6)
1913                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1914         else if (INTEL_INFO(dev)->gen >= 4)
1915                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1916         else if (IS_I830(dev) || IS_845G(dev))
1917                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1918         else
1919                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1920         ring->init = init_render_ring;
1921         ring->cleanup = render_ring_cleanup;
1922
1923         /* Workaround batchbuffer to combat CS tlb bug. */
1924         if (HAS_BROKEN_CS_TLB(dev)) {
1925                 struct drm_i915_gem_object *obj;
1926                 int ret;
1927
1928                 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
1929                 if (obj == NULL) {
1930                         DRM_ERROR("Failed to allocate batch bo\n");
1931                         return -ENOMEM;
1932                 }
1933
1934                 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
1935                 if (ret != 0) {
1936                         drm_gem_object_unreference(&obj->base);
1937                         DRM_ERROR("Failed to ping batch bo\n");
1938                         return ret;
1939                 }
1940
1941                 ring->scratch.obj = obj;
1942                 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
1943         }
1944
1945         return intel_init_ring_buffer(dev, ring);
1946 }
1947
1948 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1949 {
1950         drm_i915_private_t *dev_priv = dev->dev_private;
1951         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1952         int ret;
1953
1954         ring->name = "render ring";
1955         ring->id = RCS;
1956         ring->mmio_base = RENDER_RING_BASE;
1957
1958         if (INTEL_INFO(dev)->gen >= 6) {
1959                 /* non-kms not supported on gen6+ */
1960                 return -ENODEV;
1961         }
1962
1963         /* Note: gem is not supported on gen5/ilk without kms (the corresponding
1964          * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1965          * the special gen5 functions. */
1966         ring->add_request = i9xx_add_request;
1967         if (INTEL_INFO(dev)->gen < 4)
1968                 ring->flush = gen2_render_ring_flush;
1969         else
1970                 ring->flush = gen4_render_ring_flush;
1971         ring->get_seqno = ring_get_seqno;
1972         ring->set_seqno = ring_set_seqno;
1973         if (IS_GEN2(dev)) {
1974                 ring->irq_get = i8xx_ring_get_irq;
1975                 ring->irq_put = i8xx_ring_put_irq;
1976         } else {
1977                 ring->irq_get = i9xx_ring_get_irq;
1978                 ring->irq_put = i9xx_ring_put_irq;
1979         }
1980         ring->irq_enable_mask = I915_USER_INTERRUPT;
1981         ring->write_tail = ring_write_tail;
1982         if (INTEL_INFO(dev)->gen >= 4)
1983                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1984         else if (IS_I830(dev) || IS_845G(dev))
1985                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1986         else
1987                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1988         ring->init = init_render_ring;
1989         ring->cleanup = render_ring_cleanup;
1990
1991         ring->dev = dev;
1992         INIT_LIST_HEAD(&ring->active_list);
1993         INIT_LIST_HEAD(&ring->request_list);
1994
1995         ring->size = size;
1996         ring->effective_size = ring->size;
1997         if (IS_I830(ring->dev) || IS_845G(ring->dev))
1998                 ring->effective_size -= 128;
1999
2000         ring->virtual_start = ioremap_wc(start, size);
2001         if (ring->virtual_start == NULL) {
2002                 DRM_ERROR("can not ioremap virtual address for"
2003                           " ring buffer\n");
2004                 return -ENOMEM;
2005         }
2006
2007         if (!I915_NEED_GFX_HWS(dev)) {
2008                 ret = init_phys_status_page(ring);
2009                 if (ret)
2010                         return ret;
2011         }
2012
2013         return 0;
2014 }
2015
2016 int intel_init_bsd_ring_buffer(struct drm_device *dev)
2017 {
2018         drm_i915_private_t *dev_priv = dev->dev_private;
2019         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
2020
2021         ring->name = "bsd ring";
2022         ring->id = VCS;
2023
2024         ring->write_tail = ring_write_tail;
2025         if (INTEL_INFO(dev)->gen >= 6) {
2026                 ring->mmio_base = GEN6_BSD_RING_BASE;
2027                 /* gen6 bsd needs a special wa for tail updates */
2028                 if (IS_GEN6(dev))
2029                         ring->write_tail = gen6_bsd_ring_write_tail;
2030                 ring->flush = gen6_bsd_ring_flush;
2031                 ring->add_request = gen6_add_request;
2032                 ring->get_seqno = gen6_ring_get_seqno;
2033                 ring->set_seqno = ring_set_seqno;
2034                 if (INTEL_INFO(dev)->gen >= 8) {
2035                         ring->irq_enable_mask =
2036                                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2037                         ring->irq_get = gen8_ring_get_irq;
2038                         ring->irq_put = gen8_ring_put_irq;
2039                         ring->dispatch_execbuffer =
2040                                 gen8_ring_dispatch_execbuffer;
2041                 } else {
2042                         ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2043                         ring->irq_get = gen6_ring_get_irq;
2044                         ring->irq_put = gen6_ring_put_irq;
2045                         ring->dispatch_execbuffer =
2046                                 gen6_ring_dispatch_execbuffer;
2047                 }
2048                 ring->sync_to = gen6_ring_sync;
2049                 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
2050                 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2051                 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
2052                 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
2053                 ring->signal_mbox[RCS] = GEN6_RVSYNC;
2054                 ring->signal_mbox[VCS] = GEN6_NOSYNC;
2055                 ring->signal_mbox[BCS] = GEN6_BVSYNC;
2056                 ring->signal_mbox[VECS] = GEN6_VEVSYNC;
2057         } else {
2058                 ring->mmio_base = BSD_RING_BASE;
2059                 ring->flush = bsd_ring_flush;
2060                 ring->add_request = i9xx_add_request;
2061                 ring->get_seqno = ring_get_seqno;
2062                 ring->set_seqno = ring_set_seqno;
2063                 if (IS_GEN5(dev)) {
2064                         ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2065                         ring->irq_get = gen5_ring_get_irq;
2066                         ring->irq_put = gen5_ring_put_irq;
2067                 } else {
2068                         ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2069                         ring->irq_get = i9xx_ring_get_irq;
2070                         ring->irq_put = i9xx_ring_put_irq;
2071                 }
2072                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2073         }
2074         ring->init = init_ring_common;
2075
2076         return intel_init_ring_buffer(dev, ring);
2077 }
2078
2079 int intel_init_blt_ring_buffer(struct drm_device *dev)
2080 {
2081         drm_i915_private_t *dev_priv = dev->dev_private;
2082         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
2083
2084         ring->name = "blitter ring";
2085         ring->id = BCS;
2086
2087         ring->mmio_base = BLT_RING_BASE;
2088         ring->write_tail = ring_write_tail;
2089         ring->flush = gen6_ring_flush;
2090         ring->add_request = gen6_add_request;
2091         ring->get_seqno = gen6_ring_get_seqno;
2092         ring->set_seqno = ring_set_seqno;
2093         if (INTEL_INFO(dev)->gen >= 8) {
2094                 ring->irq_enable_mask =
2095                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2096                 ring->irq_get = gen8_ring_get_irq;
2097                 ring->irq_put = gen8_ring_put_irq;
2098                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2099         } else {
2100                 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2101                 ring->irq_get = gen6_ring_get_irq;
2102                 ring->irq_put = gen6_ring_put_irq;
2103                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2104         }
2105         ring->sync_to = gen6_ring_sync;
2106         ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
2107         ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
2108         ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2109         ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
2110         ring->signal_mbox[RCS] = GEN6_RBSYNC;
2111         ring->signal_mbox[VCS] = GEN6_VBSYNC;
2112         ring->signal_mbox[BCS] = GEN6_NOSYNC;
2113         ring->signal_mbox[VECS] = GEN6_VEBSYNC;
2114         ring->init = init_ring_common;
2115
2116         return intel_init_ring_buffer(dev, ring);
2117 }
2118
2119 int intel_init_vebox_ring_buffer(struct drm_device *dev)
2120 {
2121         drm_i915_private_t *dev_priv = dev->dev_private;
2122         struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
2123
2124         ring->name = "video enhancement ring";
2125         ring->id = VECS;
2126
2127         ring->mmio_base = VEBOX_RING_BASE;
2128         ring->write_tail = ring_write_tail;
2129         ring->flush = gen6_ring_flush;
2130         ring->add_request = gen6_add_request;
2131         ring->get_seqno = gen6_ring_get_seqno;
2132         ring->set_seqno = ring_set_seqno;
2133
2134         if (INTEL_INFO(dev)->gen >= 8) {
2135                 ring->irq_enable_mask =
2136                         GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2137                 ring->irq_get = gen8_ring_get_irq;
2138                 ring->irq_put = gen8_ring_put_irq;
2139                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2140         } else {
2141                 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2142                 ring->irq_get = hsw_vebox_get_irq;
2143                 ring->irq_put = hsw_vebox_put_irq;
2144                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2145         }
2146         ring->sync_to = gen6_ring_sync;
2147         ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
2148         ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
2149         ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
2150         ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2151         ring->signal_mbox[RCS] = GEN6_RVESYNC;
2152         ring->signal_mbox[VCS] = GEN6_VVESYNC;
2153         ring->signal_mbox[BCS] = GEN6_BVESYNC;
2154         ring->signal_mbox[VECS] = GEN6_NOSYNC;
2155         ring->init = init_ring_common;
2156
2157         return intel_init_ring_buffer(dev, ring);
2158 }
2159
2160 int
2161 intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
2162 {
2163         int ret;
2164
2165         if (!ring->gpu_caches_dirty)
2166                 return 0;
2167
2168         ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
2169         if (ret)
2170                 return ret;
2171
2172         trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
2173
2174         ring->gpu_caches_dirty = false;
2175         return 0;
2176 }
2177
2178 int
2179 intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
2180 {
2181         uint32_t flush_domains;
2182         int ret;
2183
2184         flush_domains = 0;
2185         if (ring->gpu_caches_dirty)
2186                 flush_domains = I915_GEM_GPU_DOMAINS;
2187
2188         ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2189         if (ret)
2190                 return ret;
2191
2192         trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2193
2194         ring->gpu_caches_dirty = false;
2195         return 0;
2196 }