Merge tag 'drm-intel-next-2015-10-10' of git://anongit.freedesktop.org/drm-intel...
authorDave Airlie <airlied@redhat.com>
Mon, 19 Oct 2015 23:00:01 +0000 (09:00 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 19 Oct 2015 23:00:01 +0000 (09:00 +1000)
- dmc fixes from Animesh (not yet all) for deeper sleep states
- piles of prep patches from Ville to make mmio functions type-safe
- more fbc work from Paulo all over
- w/a shuffling from Arun Siluvery
- first part of atomic watermark updates from Matt and Ville (later parts had to
  be dropped again unfortunately)
- lots of patches to prepare bxt dsi support ( Shashank Sharma)
- userptr fixes from Chris
- audio rate interface between i915/snd_hda plus kerneldoc (Libin Yang)
- shrinker improvements and fixes (Chris Wilson)
- lots and lots of small patches all over

* tag 'drm-intel-next-2015-10-10' of git://anongit.freedesktop.org/drm-intel: (134 commits)
  drm/i915: Update DRIVER_DATE to 20151010
  drm/i915: Partial revert of atomic watermark series
  drm/i915: Early exit from semaphore_waits_for for execlist mode.
  drm/i915: Remove wrong warning from i915_gem_context_clean
  drm/i915: Determine the stolen memory base address on gen2
  drm/i915: fix FBC buffer size checks
  drm/i915: fix CFB size calculation
  drm/i915: remove pre-atomic check from SKL update_primary_plane
  drm/i915: don't allocate fbcon from stolen memory if it's too big
  Revert "drm/i915: Call encoder hotplug for init and resume cases"
  Revert "drm/i915: Add hot_plug hook for hdmi encoder"
  drm/i915: use error path
  drm/i915/irq: Fix misspelled word register in kernel-doc
  drm/i915/irq: Fix kernel-doc warnings
  drm/i915: Hook up ring workaround writes at context creation time on Gen6-7.
  drm/i915: Don't warn if the workaround list is empty.
  drm/i915: Resurrect golden context on gen6/7
  drm/i915/chv: remove pre-production hardware workarounds
  drm/i915/snb: remove pre-production hardware workaround
  drm/i915/bxt: Set time interval unit to 0.833us
  ...

1  2 
Documentation/DocBook/drm.tmpl
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_fence.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_runtime_pm.c

index a34fa4705ebfb77e48d76f80df6d1fcad8591470,a249c73ec1b54b9d4d4cae2d3fbbe4ec764598fd..ac76a8b0baaacc9baf67cc80fbc7cf1313b88736
      <para>
        At the core of every DRM driver is a <structname>drm_driver</structname>
        structure. Drivers typically statically initialize a drm_driver structure,
 -      and then pass it to one of the <function>drm_*_init()</function> functions
 -      to register it with the DRM subsystem.
 -    </para>
 -    <para>
 -      Newer drivers that no longer require a <structname>drm_bus</structname>
 -      structure can alternatively use the low-level device initialization and
 -      registration functions such as <function>drm_dev_alloc()</function> and
 -      <function>drm_dev_register()</function> directly.
 +      and then pass it to <function>drm_dev_alloc()</function> to allocate a
 +      device instance. After the device instance is fully initialized it can be
 +      registered (which makes it accessible from userspace) using
 +      <function>drm_dev_register()</function>.
      </para>
      <para>
        The <structname>drm_driver</structname> structure contains static
@@@ -292,12 -296,83 +292,12 @@@ char *date;</synopsis
        </sect3>
      </sect2>
      <sect2>
 -      <title>Device Registration</title>
 -      <para>
 -        A number of functions are provided to help with device registration.
 -        The functions deal with PCI and platform devices, respectively.
 -      </para>
 -!Edrivers/gpu/drm/drm_pci.c
 -!Edrivers/gpu/drm/drm_platform.c
 -      <para>
 -        New drivers that no longer rely on the services provided by the
 -        <structname>drm_bus</structname> structure can call the low-level
 -        device registration functions directly. The
 -        <function>drm_dev_alloc()</function> function can be used to allocate
 -        and initialize a new <structname>drm_device</structname> structure.
 -        Drivers will typically want to perform some additional setup on this
 -        structure, such as allocating driver-specific data and storing a
 -        pointer to it in the DRM device's <structfield>dev_private</structfield>
 -        field. Drivers should also set the device's unique name using the
 -        <function>drm_dev_set_unique()</function> function. After it has been
 -        set up a device can be registered with the DRM subsystem by calling
 -        <function>drm_dev_register()</function>. This will cause the device to
 -        be exposed to userspace and will call the driver's
 -        <structfield>.load()</structfield> implementation. When a device is
 -        removed, the DRM device can safely be unregistered and freed by calling
 -        <function>drm_dev_unregister()</function> followed by a call to
 -        <function>drm_dev_unref()</function>.
 -      </para>
 +      <title>Device Instance and Driver Handling</title>
 +!Pdrivers/gpu/drm/drm_drv.c driver instance overview
  !Edrivers/gpu/drm/drm_drv.c
      </sect2>
      <sect2>
        <title>Driver Load</title>
 -      <para>
 -        The <methodname>load</methodname> method is the driver and device
 -        initialization entry point. The method is responsible for allocating and
 -      initializing driver private data, performing resource allocation and
 -      mapping (e.g. acquiring
 -        clocks, mapping registers or allocating command buffers), initializing
 -        the memory manager (<xref linkend="drm-memory-management"/>), installing
 -        the IRQ handler (<xref linkend="drm-irq-registration"/>), setting up
 -        vertical blanking handling (<xref linkend="drm-vertical-blank"/>), mode
 -      setting (<xref linkend="drm-mode-setting"/>) and initial output
 -      configuration (<xref linkend="drm-kms-init"/>).
 -      </para>
 -      <note><para>
 -        If compatibility is a concern (e.g. with drivers converted over from
 -        User Mode Setting to Kernel Mode Setting), care must be taken to prevent
 -        device initialization and control that is incompatible with currently
 -        active userspace drivers. For instance, if user level mode setting
 -        drivers are in use, it would be problematic to perform output discovery
 -        &amp; configuration at load time. Likewise, if user-level drivers
 -        unaware of memory management are in use, memory management and command
 -        buffer setup may need to be omitted. These requirements are
 -        driver-specific, and care needs to be taken to keep both old and new
 -        applications and libraries working.
 -      </para></note>
 -      <synopsis>int (*load) (struct drm_device *, unsigned long flags);</synopsis>
 -      <para>
 -        The method takes two arguments, a pointer to the newly created
 -      <structname>drm_device</structname> and flags. The flags are used to
 -      pass the <structfield>driver_data</structfield> field of the device id
 -      corresponding to the device passed to <function>drm_*_init()</function>.
 -      Only PCI devices currently use this, USB and platform DRM drivers have
 -      their <methodname>load</methodname> method called with flags to 0.
 -      </para>
 -      <sect3>
 -        <title>Driver Private Data</title>
 -        <para>
 -          The driver private hangs off the main
 -          <structname>drm_device</structname> structure and can be used for
 -          tracking various device-specific bits of information, like register
 -          offsets, command buffer status, register state for suspend/resume, etc.
 -          At load time, a driver may simply allocate one and set
 -          <structname>drm_device</structname>.<structfield>dev_priv</structfield>
 -          appropriately; it should be freed and
 -          <structname>drm_device</structname>.<structfield>dev_priv</structfield>
 -          set to NULL when the driver is unloaded.
 -        </para>
 -      </sect3>
        <sect3 id="drm-irq-registration">
          <title>IRQ Registration</title>
          <para>
          </para>
        </sect3>
      </sect2>
 +    <sect2>
 +      <title>Bus-specific Device Registration and PCI Support</title>
 +      <para>
 +        A number of functions are provided to help with device registration.
 +      The functions deal with PCI and platform devices respectively and are
 +      only provided for historical reasons. These are all deprecated and
 +      shouldn't be used in new drivers. Besides that there's a few
 +      helpers for pci drivers.
 +      </para>
 +!Edrivers/gpu/drm/drm_pci.c
 +!Edrivers/gpu/drm/drm_platform.c
 +    </sect2>
    </sect1>
  
    <!-- Internals: memory management -->
@@@ -3689,7 -3752,6 +3689,7 @@@ int num_ioctls;</synopsis
          </itemizedlist>
        </para>
        </para>
 +!Edrivers/gpu/drm/drm_ioctl.c
      </sect2>
    </sect1>
    <sect1>
        <title>High Definition Audio</title>
  !Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
  !Idrivers/gpu/drm/i915/intel_audio.c
+ !Iinclude/drm/i915_component.h
        </sect2>
        <sect2>
        <title>Panel Self Refresh PSR (PSR/SRD)</title>
index 84c7b6b294ee5318128a7ffd0423dca09c09b6dd,7e65015ecbee104383c531226be3f9a7ce7e9bd9..3f2a7a7c7cd41392b216ace25f7024cbb013e04c
@@@ -253,7 -253,11 +253,11 @@@ static int obj_rank_by_stolen(void *pri
        struct drm_i915_gem_object *b =
                container_of(B, struct drm_i915_gem_object, obj_exec_link);
  
-       return a->stolen->start - b->stolen->start;
+       if (a->stolen->start < b->stolen->start)
+               return -1;
+       if (a->stolen->start > b->stolen->start)
+               return 1;
+       return 0;
  }
  
  static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
@@@ -952,6 -956,7 +956,6 @@@ static int i915_gem_fence_regs_info(str
        if (ret)
                return ret;
  
 -      seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
        seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
@@@ -1308,6 -1313,10 +1312,10 @@@ static int i915_frequency_info(struct s
                seq_puts(m, "no P-state info available\n");
        }
  
+       seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
+       seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
+       seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
  out:
        intel_runtime_pm_put(dev_priv);
        return ret;
@@@ -2230,10 -2239,9 +2238,9 @@@ static void gen8_ppgtt_info(struct seq_
        for_each_ring(ring, dev_priv, unused) {
                seq_printf(m, "%s\n", ring->name);
                for (i = 0; i < 4; i++) {
-                       u32 offset = 0x270 + i * 8;
-                       u64 pdp = I915_READ(ring->mmio_base + offset + 4);
+                       u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
                        pdp <<= 32;
-                       pdp |= I915_READ(ring->mmio_base + offset);
+                       pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
                        seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
                }
        }
@@@ -2290,18 -2298,21 +2297,21 @@@ static int i915_ppgtt_info(struct seq_f
                struct task_struct *task;
  
                task = get_pid_task(file->pid, PIDTYPE_PID);
-               if (!task)
-                       return -ESRCH;
+               if (!task) {
+                       ret = -ESRCH;
+                       goto out_put;
+               }
                seq_printf(m, "\nproc: %s\n", task->comm);
                put_task_struct(task);
                idr_for_each(&file_priv->context_idr, per_file_ctx,
                             (void *)(unsigned long)m);
        }
  
+ out_put:
        intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
  
-       return 0;
+       return ret;
  }
  
  static int count_irq_waiters(struct drm_i915_private *i915)
@@@ -2909,7 -2920,7 +2919,7 @@@ static bool cursor_active(struct drm_de
        u32 state;
  
        if (IS_845G(dev) || IS_I865G(dev))
-               state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
+               state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
        else
                state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
  
@@@ -3147,7 -3158,7 +3157,7 @@@ static int i915_ddb_info(struct seq_fil
                                   skl_ddb_entry_size(entry));
                }
  
-               entry = &ddb->cursor[pipe];
+               entry = &ddb->plane[pipe][PLANE_CURSOR];
                seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
                           entry->end, skl_ddb_entry_size(entry));
        }
@@@ -5040,13 -5051,38 +5050,38 @@@ static void gen9_sseu_device_status(str
        }
  }
  
+ static void broadwell_sseu_device_status(struct drm_device *dev,
+                                        struct sseu_dev_status *stat)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int s;
+       u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
+       stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
+       if (stat->slice_total) {
+               stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
+               stat->subslice_total = stat->slice_total *
+                                      stat->subslice_per_slice;
+               stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
+               stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
+               /* subtract fused off EU(s) from enabled slice(s) */
+               for (s = 0; s < stat->slice_total; s++) {
+                       u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
+                       stat->eu_total -= hweight8(subslice_7eu);
+               }
+       }
+ }
  static int i915_sseu_status(struct seq_file *m, void *unused)
  {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct sseu_dev_status stat;
  
-       if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev))
+       if (INTEL_INFO(dev)->gen < 8)
                return -ENODEV;
  
        seq_puts(m, "SSEU Device Info\n");
        memset(&stat, 0, sizeof(stat));
        if (IS_CHERRYVIEW(dev)) {
                cherryview_sseu_device_status(dev, &stat);
+       } else if (IS_BROADWELL(dev)) {
+               broadwell_sseu_device_status(dev, &stat);
        } else if (INTEL_INFO(dev)->gen >= 9) {
                gen9_sseu_device_status(dev, &stat);
        }
index 0eda746850ef62df96fb8c12f3fa4e6457b75ad6,499060a08d25b9e3cd3cd98c1f61be8f53db752a..1e3d65743bd240075c9fd568d64e45f7bd1dd0d6
@@@ -75,7 -75,7 +75,7 @@@ static int i915_getparam(struct drm_dev
                value = 1;
                break;
        case I915_PARAM_NUM_FENCES_AVAIL:
 -              value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
 +              value = dev_priv->num_fence_regs;
                break;
        case I915_PARAM_HAS_OVERLAY:
                value = dev_priv->overlay ? 1 : 0;
        return 0;
  }
  
 -static int i915_setparam(struct drm_device *dev, void *data,
 -                       struct drm_file *file_priv)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      drm_i915_setparam_t *param = data;
 -
 -      switch (param->param) {
 -      case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
 -      case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
 -      case I915_SETPARAM_ALLOW_BATCHBUFFER:
 -              /* Reject all old ums/dri params. */
 -              return -ENODEV;
 -
 -      case I915_SETPARAM_NUM_USED_FENCES:
 -              if (param->value > dev_priv->num_fence_regs ||
 -                  param->value < 0)
 -                      return -EINVAL;
 -              /* Userspace can use first N regs */
 -              dev_priv->fence_reg_start = param->value;
 -              break;
 -      default:
 -              DRM_DEBUG_DRIVER("unknown parameter %d\n",
 -                                      param->param);
 -              return -EINVAL;
 -      }
 -
 -      return 0;
 -}
 -
  static int i915_get_bridge_dev(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -673,6 -702,82 +673,82 @@@ static void gen9_sseu_info_init(struct 
        info->has_eu_pg = (info->eu_per_subslice > 2);
  }
  
+ static void broadwell_sseu_info_init(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_device_info *info;
+       const int s_max = 3, ss_max = 3, eu_max = 8;
+       int s, ss;
+       u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+       fuse2 = I915_READ(GEN8_FUSE2);
+       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+       ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+       eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
+       eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
+                       ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
+                        (32 - GEN8_EU_DIS0_S1_SHIFT));
+       eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
+                       ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
+                        (32 - GEN8_EU_DIS1_S2_SHIFT));
+       info = (struct intel_device_info *)&dev_priv->info;
+       info->slice_total = hweight32(s_enable);
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+        */
+       info->subslice_per_slice = ss_max - hweight32(ss_disable);
+       info->subslice_total = info->slice_total * info->subslice_per_slice;
+       /*
+        * Iterate through enabled slices and subslices to
+        * count the total enabled EU.
+        */
+       for (s = 0; s < s_max; s++) {
+               if (!(s_enable & (0x1 << s)))
+                       /* skip disabled slice */
+                       continue;
+               for (ss = 0; ss < ss_max; ss++) {
+                       u32 n_disabled;
+                       if (ss_disable & (0x1 << ss))
+                               /* skip disabled subslice */
+                               continue;
+                       n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
+                       /*
+                        * Record which subslices have 7 EUs.
+                        */
+                       if (eu_max - n_disabled == 7)
+                               info->subslice_7eu[s] |= 1 << ss;
+                       info->eu_total += eu_max - n_disabled;
+               }
+       }
+       /*
+        * BDW is expected to always have a uniform distribution of EU across
+        * subslices with the exception that any one EU in any one subslice may
+        * be fused off for die recovery.
+        */
+       info->eu_per_subslice = info->subslice_total ?
+               DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+       /*
+        * BDW supports slice power gating on devices with more than
+        * one slice.
+        */
+       info->has_slice_pg = (info->slice_total > 1);
+       info->has_subslice_pg = 0;
+       info->has_eu_pg = 0;
+ }
  /*
   * Determine various intel_device_info fields at runtime.
   *
@@@ -743,6 -848,8 +819,8 @@@ static void intel_device_info_runtime_i
        /* Initialize slice/subslice/EU info */
        if (IS_CHERRYVIEW(dev))
                cherryview_sseu_info_init(dev);
+       else if (IS_BROADWELL(dev))
+               broadwell_sseu_info_init(dev);
        else if (INTEL_INFO(dev)->gen >= 9)
                gen9_sseu_info_init(dev);
  
@@@ -818,6 -925,7 +896,7 @@@ int i915_driver_load(struct drm_device 
        mutex_init(&dev_priv->sb_lock);
        mutex_init(&dev_priv->modeset_restore_lock);
        mutex_init(&dev_priv->csr_lock);
+       mutex_init(&dev_priv->av_mutex);
  
        intel_pm_setup(dev);
  
@@@ -1045,12 -1153,9 +1124,9 @@@ out_freecsr
  put_bridge:
        pci_dev_put(dev_priv->bridge_dev);
  free_priv:
-       if (dev_priv->requests)
-               kmem_cache_destroy(dev_priv->requests);
-       if (dev_priv->vmas)
-               kmem_cache_destroy(dev_priv->vmas);
-       if (dev_priv->objects)
-               kmem_cache_destroy(dev_priv->objects);
+       kmem_cache_destroy(dev_priv->requests);
+       kmem_cache_destroy(dev_priv->vmas);
+       kmem_cache_destroy(dev_priv->objects);
        kfree(dev_priv);
        return ret;
  }
@@@ -1141,13 -1246,9 +1217,9 @@@ int i915_driver_unload(struct drm_devic
        if (dev_priv->regs != NULL)
                pci_iounmap(dev->pdev, dev_priv->regs);
  
-       if (dev_priv->requests)
-               kmem_cache_destroy(dev_priv->requests);
-       if (dev_priv->vmas)
-               kmem_cache_destroy(dev_priv->vmas);
-       if (dev_priv->objects)
-               kmem_cache_destroy(dev_priv->objects);
+       kmem_cache_destroy(dev_priv->requests);
+       kmem_cache_destroy(dev_priv->vmas);
+       kmem_cache_destroy(dev_priv->objects);
        pci_dev_put(dev_priv->bridge_dev);
        kfree(dev_priv);
  
@@@ -1217,7 -1318,7 +1289,7 @@@ const struct drm_ioctl_desc i915_ioctls
        DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
 -      DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 +      DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
index 0841ca569ccb78d213d0e3739ffae0eada31d2e7,3c52a77220fa01e8d559040ca484492158bd6539..5adba06a85d1dba61f28e67755c8407b9be3bd2f
@@@ -57,7 -57,7 +57,7 @@@
  
  #define DRIVER_NAME           "i915"
  #define DRIVER_DESC           "Intel Graphics"
- #define DRIVER_DATE           "20150928"
+ #define DRIVER_DATE           "20151010"
  
  #undef WARN_ON
  /* Many gcc seem to no see through this and fall over :( */
@@@ -131,17 -131,17 +131,17 @@@ enum transcoder 
  #define transcoder_name(t) ((t) + 'A')
  
  /*
-  * This is the maximum (across all platforms) number of planes (primary +
-  * sprites) that can be active at the same time on one pipe.
-  *
-  * This value doesn't count the cursor plane.
+  * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+  * number of planes per CRTC.  Not all platforms really have this many planes,
+  * which means some arrays of size I915_MAX_PLANES may have unused entries
+  * between the topmost sprite plane and the cursor plane.
   */
- #define I915_MAX_PLANES       4
  enum plane {
        PLANE_A = 0,
        PLANE_B,
        PLANE_C,
+       PLANE_CURSOR,
+       I915_MAX_PLANES,
  };
  #define plane_name(p) ((p) + 'A')
  
@@@ -628,10 -628,6 +628,6 @@@ struct drm_i915_display_funcs 
                          struct dpll *match_clock,
                          struct dpll *best_clock);
        void (*update_wm)(struct drm_crtc *crtc);
-       void (*update_sprite_wm)(struct drm_plane *plane,
-                                struct drm_crtc *crtc,
-                                uint32_t sprite_width, uint32_t sprite_height,
-                                int pixel_size, bool enable, bool scaled);
        int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
        void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
        /* Returns the active state of the crtc, and if the crtc is active,
        void (*crtc_disable)(struct drm_crtc *crtc);
        void (*audio_codec_enable)(struct drm_connector *connector,
                                   struct intel_encoder *encoder,
-                                  struct drm_display_mode *mode);
+                                  const struct drm_display_mode *adjusted_mode);
        void (*audio_codec_disable)(struct intel_encoder *encoder);
        void (*fdi_link_train)(struct drm_crtc *crtc);
        void (*init_clock_gating)(struct drm_device *dev);
        /* render clock increase/decrease */
        /* display clock increase/decrease */
        /* pll clock increase/decrease */
-       int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
-       uint32_t (*get_backlight)(struct intel_connector *connector);
-       void (*set_backlight)(struct intel_connector *connector,
-                             uint32_t level);
-       void (*disable_backlight)(struct intel_connector *connector);
-       void (*enable_backlight)(struct intel_connector *connector);
-       uint32_t (*backlight_hz_to_pwm)(struct intel_connector *connector,
-                                       uint32_t hz);
  };
  
  enum forcewake_domain_id {
@@@ -1146,7 -1133,6 +1133,6 @@@ struct intel_gen6_power_mgmt 
        u8 efficient_freq;      /* AKA RPe. Pre-determined balanced frequency */
        u8 rp1_freq;            /* "less than" RP0 power/freqency */
        u8 rp0_freq;            /* Non-overclocked max frequency. */
-       u32 cz_freq;
  
        u8 up_threshold; /* Current %busy required to uplock */
        u8 down_threshold; /* Current %busy required to downclock */
@@@ -1588,8 -1574,7 +1574,7 @@@ static inline bool skl_ddb_entry_equal(
  struct skl_ddb_allocation {
        struct skl_ddb_entry pipe[I915_MAX_PIPES];
        struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
-       struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* y-plane */
-       struct skl_ddb_entry cursor[I915_MAX_PIPES];
+       struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
  };
  
  struct skl_wm_values {
        struct skl_ddb_allocation ddb;
        uint32_t wm_linetime[I915_MAX_PIPES];
        uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
-       uint32_t cursor[I915_MAX_PIPES][8];
        uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
-       uint32_t cursor_trans[I915_MAX_PIPES];
  };
  
  struct skl_wm_level {
        bool plane_en[I915_MAX_PLANES];
-       bool cursor_en;
        uint16_t plane_res_b[I915_MAX_PLANES];
        uint8_t plane_res_l[I915_MAX_PLANES];
-       uint16_t cursor_res_b;
-       uint8_t cursor_res_l;
  };
  
  /*
@@@ -1802,6 -1782,7 +1782,6 @@@ struct drm_i915_private 
        struct mutex pps_mutex;
  
        struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
 -      int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
        int num_fence_regs; /* 8 on pre-965, 16 otherwise */
  
        unsigned int fsb_freq, mem_freq, is_ddr3;
        unsigned int cdclk_freq, max_cdclk_freq;
        unsigned int max_dotclk_freq;
        unsigned int hpll_freq;
+       unsigned int czclk_freq;
  
        /**
         * wq - Driver workqueue for GEM.
        /* hda/i915 audio component */
        struct i915_audio_component *audio_component;
        bool audio_component_registered;
+       /**
+        * av_mutex - mutex for audio/video sync
+        *
+        */
+       struct mutex av_mutex;
  
        uint32_t hw_context_size;
        struct list_head context_list;
  
        bool edp_low_vswing;
  
+       /* perform PHY state sanity checks? */
+       bool chv_phy_assert[2];
        /*
         * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
         * will be rejected. Instead look for a better place.
@@@ -2607,6 -2597,7 +2596,7 @@@ struct drm_i915_cmd_table 
  #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE               0x9c00
  #define INTEL_PCH_SPT_DEVICE_ID_TYPE          0xA100
  #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE               0x9D00
+ #define INTEL_PCH_P2X_DEVICE_ID_TYPE          0x7100
  
  #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
  #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
@@@ -2824,6 -2815,8 +2814,8 @@@ void i915_gem_vma_destroy(struct i915_v
  #define PIN_OFFSET_BIAS       (1<<3)
  #define PIN_USER      (1<<4)
  #define PIN_UPDATE    (1<<5)
+ #define PIN_ZONE_4G   (1<<6)
+ #define PIN_HIGH      (1<<7)
  #define PIN_OFFSET_MASK (~4095)
  int __must_check
  i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@@ -2839,6 -2832,11 +2831,11 @@@ i915_gem_object_ggtt_pin(struct drm_i91
  int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                  u32 flags);
  int __must_check i915_vma_unbind(struct i915_vma *vma);
+ /*
+  * BEWARE: Do not use the function below unless you can _absolutely_
+  * _guarantee_ VMA in question is _not in use_ anywhere.
+  */
+ int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
  int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
  void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
  void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
@@@ -3167,7 -3165,6 +3164,6 @@@ int __must_check i915_gem_evict_somethi
                                          unsigned long end,
                                          unsigned flags);
  int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
- int i915_gem_evict_everything(struct drm_device *dev);
  
  /* belongs in i915_gem_gtt.h */
  static inline void i915_gem_chipset_flush(struct drm_device *dev)
@@@ -3198,11 -3195,12 +3194,12 @@@ i915_gem_object_create_stolen_for_preal
  
  /* i915_gem_shrinker.c */
  unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
-                             long target,
+                             unsigned long target,
                              unsigned flags);
  #define I915_SHRINK_PURGEABLE 0x1
  #define I915_SHRINK_UNBOUND 0x2
  #define I915_SHRINK_BOUND 0x4
+ #define I915_SHRINK_ACTIVE 0x8
  unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
  void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
  
index ab80f7370ab7f2f62a0da9fa06c7ea536a598364,1cbfd5b83135500e145a5a840d03e36a573a2139..40a10b25956c2a26fc75500d2e10a77b639e43fb
@@@ -59,19 -59,19 +59,19 @@@ static void i965_write_fence_reg(struc
                                 struct drm_i915_gem_object *obj)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int fence_reg;
+       int fence_reg_lo, fence_reg_hi;
        int fence_pitch_shift;
  
        if (INTEL_INFO(dev)->gen >= 6) {
-               fence_reg = FENCE_REG_SANDYBRIDGE_0;
-               fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+               fence_reg_lo = FENCE_REG_GEN6_LO(reg);
+               fence_reg_hi = FENCE_REG_GEN6_HI(reg);
+               fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
        } else {
-               fence_reg = FENCE_REG_965_0;
+               fence_reg_lo = FENCE_REG_965_LO(reg);
+               fence_reg_hi = FENCE_REG_965_HI(reg);
                fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
        }
  
-       fence_reg += reg * 8;
        /* To w/a incoherency with non-atomic 64-bit register updates,
         * we split the 64-bit update into two 32-bit writes. In order
         * for a partial fence not to be evaluated between writes, we
@@@ -81,8 -81,8 +81,8 @@@
         * For extra levels of paranoia, we make sure each step lands
         * before applying the next step.
         */
-       I915_WRITE(fence_reg, 0);
-       POSTING_READ(fence_reg);
+       I915_WRITE(fence_reg_lo, 0);
+       POSTING_READ(fence_reg_lo);
  
        if (obj) {
                u32 size = i915_gem_obj_ggtt_size(obj);
                        val |= 1 << I965_FENCE_TILING_Y_SHIFT;
                val |= I965_FENCE_REG_VALID;
  
-               I915_WRITE(fence_reg + 4, val >> 32);
-               POSTING_READ(fence_reg + 4);
+               I915_WRITE(fence_reg_hi, val >> 32);
+               POSTING_READ(fence_reg_hi);
  
-               I915_WRITE(fence_reg + 0, val);
-               POSTING_READ(fence_reg);
+               I915_WRITE(fence_reg_lo, val);
+               POSTING_READ(fence_reg_lo);
        } else {
-               I915_WRITE(fence_reg + 4, 0);
-               POSTING_READ(fence_reg + 4);
+               I915_WRITE(fence_reg_hi, 0);
+               POSTING_READ(fence_reg_hi);
        }
  }
  
@@@ -149,13 -149,8 +149,8 @@@ static void i915_write_fence_reg(struc
        } else
                val = 0;
  
-       if (reg < 8)
-               reg = FENCE_REG_830_0 + reg * 4;
-       else
-               reg = FENCE_REG_945_8 + (reg - 8) * 4;
-       I915_WRITE(reg, val);
-       POSTING_READ(reg);
+       I915_WRITE(FENCE_REG(reg), val);
+       POSTING_READ(FENCE_REG(reg));
  }
  
  static void i830_write_fence_reg(struct drm_device *dev, int reg,
        } else
                val = 0;
  
-       I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
-       POSTING_READ(FENCE_REG_830_0 + reg * 4);
+       I915_WRITE(FENCE_REG(reg), val);
+       POSTING_READ(FENCE_REG(reg));
  }
  
  inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
@@@ -322,7 -317,7 +317,7 @@@ i915_find_fence_reg(struct drm_device *
  
        /* First try to find a free reg */
        avail = NULL;
 -      for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
 +      for (i = 0; i < dev_priv->num_fence_regs; i++) {
                reg = &dev_priv->fence_regs[i];
                if (!reg->obj)
                        return reg;
index 45086e15459ac14943e994c9bc65884b9a61cee8,637c13211613bc9413fa4e65b34fd0728669218d..4fb8a2f5628169cff0a76f154ef93ee0419b7897
@@@ -581,6 -581,7 +581,7 @@@ i915_disable_pipestat(struct drm_i915_p
  
  /**
   * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
+  * @dev: drm device
   */
  static void i915_enable_asle_pipestat(struct drm_device *dev)
  {
   *   of horizontal active on the first line of vertical active
   */
  
 -static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
 +static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  {
        /* Gen2 doesn't have a hardware frame counter */
        return 0;
  /* Called from drm generic code, passed a 'crtc', which
   * we use as a pipe index
   */
 -static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 +static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long high_frame;
        return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
  }
  
 -static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 +static u32 gm45_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int reg = PIPE_FRMCOUNT_GM45(pipe);
@@@ -767,7 -768,7 +768,7 @@@ static int __intel_get_crtc_scanline(st
        return (position + crtc->scanline_offset) % vtotal;
  }
  
 -static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
 +static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
                                    unsigned int flags, int *vpos, int *hpos,
                                    ktime_t *stime, ktime_t *etime,
                                    const struct drm_display_mode *mode)
@@@ -904,27 -905,27 +905,27 @@@ int intel_get_crtc_scanline(struct inte
        return position;
  }
  
 -static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
 +static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
                              int *max_error,
                              struct timeval *vblank_time,
                              unsigned flags)
  {
        struct drm_crtc *crtc;
  
 -      if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
 -              DRM_ERROR("Invalid crtc %d\n", pipe);
 +      if (pipe >= INTEL_INFO(dev)->num_pipes) {
 +              DRM_ERROR("Invalid crtc %u\n", pipe);
                return -EINVAL;
        }
  
        /* Get drm_crtc to timestamp: */
        crtc = intel_get_crtc_for_pipe(dev, pipe);
        if (crtc == NULL) {
 -              DRM_ERROR("Invalid crtc %d\n", pipe);
 +              DRM_ERROR("Invalid crtc %u\n", pipe);
                return -EINVAL;
        }
  
        if (!crtc->hwmode.crtc_clock) {
 -              DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
 +              DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
                return -EBUSY;
        }
  
@@@ -997,12 -998,16 +998,16 @@@ static bool vlv_c0_above(struct drm_i91
                         int threshold)
  {
        u64 time, c0;
+       unsigned int mul = 100;
  
        if (old->cz_clock == 0)
                return false;
  
+       if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+               mul <<= 8;
        time = now->cz_clock - old->cz_clock;
-       time *= threshold * dev_priv->mem_freq;
+       time *= threshold * dev_priv->czclk_freq;
  
        /* Workload can be split between render + media, e.g. SwapBuffers
         * being blitted in X after being rendered in mesa. To account for
         */
        c0 = now->render_c0 - old->render_c0;
        c0 += now->media_c0 - old->media_c0;
-       c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
+       c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
  
        return c0 >= time;
  }
@@@ -2388,6 -2393,7 +2393,7 @@@ static void i915_error_wake_up(struct d
  
  /**
   * i915_reset_and_wakeup - do process context error handling work
+  * @dev: drm device
   *
   * Fire an error uevent so userspace can see that a hang or error
   * was detected.
@@@ -2565,7 -2571,7 +2571,7 @@@ static void i915_report_and_clear_eir(s
   * i915_handle_error - handle a gpu error
   * @dev: drm device
   *
-  * Do some basic checking of regsiter state at error time and
+  * Do some basic checking of register state at error time and
   * dump it to the syslog.  Also call i915_capture_error_state() to make
   * sure we get a record and make it available in debugfs.  Fire a uevent
   * so userspace knows something bad happened (should trigger collection
@@@ -2611,7 -2617,7 +2617,7 @@@ void i915_handle_error(struct drm_devic
  /* Called from drm generic code, passed 'crtc' which
   * we use as a pipe index
   */
 -static int i915_enable_vblank(struct drm_device *dev, int pipe)
 +static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
        return 0;
  }
  
 -static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
 +static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
        return 0;
  }
  
 -static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
 +static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
        return 0;
  }
  
 -static int gen8_enable_vblank(struct drm_device *dev, int pipe)
 +static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
  /* Called from drm generic code, passed 'crtc' which
   * we use as a pipe index
   */
 -static void i915_disable_vblank(struct drm_device *dev, int pipe)
 +static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  }
  
 -static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
 +static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  }
  
 -static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
 +static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  }
  
 -static void gen8_disable_vblank(struct drm_device *dev, int pipe)
 +static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@@ -2778,6 -2784,26 +2784,26 @@@ semaphore_waits_for(struct intel_engine
        u64 offset = 0;
        int i, backwards;
  
+       /*
+        * This function does not support execlist mode - any attempt to
+        * proceed further into this function will result in a kernel panic
+        * when dereferencing ring->buffer, which is not set up in execlist
+        * mode.
+        *
+        * The correct way of doing it would be to derive the currently
+        * executing ring buffer from the current context, which is derived
+        * from the currently running request. Unfortunately, to get the
+        * current request we would have to grab the struct_mutex before doing
+        * anything else, which would be ill-advised since some other thread
+        * might have grabbed it already and managed to hang itself, causing
+        * the hang checker to deadlock.
+        *
+        * Therefore, this function does not support execlist mode in its
+        * current form. Just return NULL and move on.
+        */
+       if (ring->buffer == NULL)
+               return NULL;
        ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
        if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
                return NULL;
index 7ada76c5ecc5541e53994b297dd9ce45ce2fe940,15372598b2c3d6a9aa423687f45aea6f450623ba..0639275fc47180bcab24f15badc3ac181010e9a4
@@@ -40,7 -40,7 +40,7 @@@ static bool intel_dp_mst_compute_config
        struct drm_atomic_state *state;
        int bpp, i;
        int lane_count, slots;
-       struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        struct drm_connector *drm_connector;
        struct intel_connector *connector, *found = NULL;
        struct drm_connector_state *connector_state;
@@@ -78,7 -78,7 +78,7 @@@
                return false;
        }
  
-       mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+       mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
  
        pipe_config->pbn = mst_pbn;
        slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
@@@ -188,7 -188,6 +188,6 @@@ static void intel_mst_pre_enable_dp(str
  
  
                intel_dp_start_link_train(intel_dp);
-               intel_dp_complete_link_train(intel_dp);
                intel_dp_stop_link_train(intel_dp);
        }
  
@@@ -459,17 -458,11 +458,17 @@@ static struct drm_connector *intel_dp_a
        drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
  
        drm_mode_connector_set_path_property(connector, pathprop);
 +      return connector;
 +}
 +
 +static void intel_dp_register_mst_connector(struct drm_connector *connector)
 +{
 +      struct intel_connector *intel_connector = to_intel_connector(connector);
 +      struct drm_device *dev = connector->dev;
        drm_modeset_lock_all(dev);
        intel_connector_add_to_fbdev(intel_connector);
        drm_modeset_unlock_all(dev);
        drm_connector_register(&intel_connector->base);
 -      return connector;
  }
  
  static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@@ -515,7 -508,6 +514,7 @@@ static void intel_dp_mst_hotplug(struc
  
  static struct drm_dp_mst_topology_cbs mst_cbs = {
        .add_connector = intel_dp_add_mst_connector,
 +      .register_connector = intel_dp_register_mst_connector,
        .destroy_connector = intel_dp_destroy_mst_connector,
        .hotplug = intel_dp_mst_hotplug,
  };
index 825fa7a8df86ec1aeaa7287c0cc88f1a4ecc0f89,1bb1c9c8126ec7be43eb106fa451addf95a2b8b8..efb704ba248b8fdc6c667f26b4c31c092610aa63
@@@ -511,16 -511,16 +511,16 @@@ void intel_lrc_irq_handler(struct intel
        status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
  
        read_pointer = ring->next_context_status_buffer;
 -      write_pointer = status_pointer & 0x07;
 +      write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
        if (read_pointer > write_pointer)
 -              write_pointer += 6;
 +              write_pointer += GEN8_CSB_ENTRIES;
  
        spin_lock(&ring->execlist_lock);
  
        while (read_pointer < write_pointer) {
                read_pointer++;
 -              status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % 6));
 -              status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % 6));
 +              status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES));
 +              status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES));
  
                if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
                        continue;
        spin_unlock(&ring->execlist_lock);
  
        WARN(submit_contexts > 2, "More than two context complete events?\n");
 -      ring->next_context_status_buffer = write_pointer % 6;
 +      ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
  
        I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
 -                 _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
 +                 _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
 +                               ((u32)ring->next_context_status_buffer &
 +                                GEN8_CSB_PTR_MASK) << 8));
  }
  
  static int execlists_context_queue(struct drm_i915_gem_request *request)
@@@ -904,21 -902,6 +904,6 @@@ int intel_execlists_submission(struct i
                return -EINVAL;
        }
  
-       if (args->num_cliprects != 0) {
-               DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
-               return -EINVAL;
-       } else {
-               if (args->DR4 == 0xffffffff) {
-                       DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
-                       args->DR4 = 0;
-               }
-               if (args->DR1 || args->DR4 || args->cliprects_ptr) {
-                       DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
-                       return -EINVAL;
-               }
-       }
        if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
                DRM_DEBUG("sol reset is gen7 only\n");
                return -EINVAL;
@@@ -1479,7 -1462,6 +1464,7 @@@ static int gen8_init_common_ring(struc
  {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 +      u8 next_context_status_buffer_hw;
  
        lrc_setup_hardware_status_page(ring,
                                ring->default_context->engine[ring->id].state);
                   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
                   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
        POSTING_READ(RING_MODE_GEN7(ring));
 -      ring->next_context_status_buffer = 0;
 +
 +      /*
 +       * Instead of resetting the Context Status Buffer (CSB) read pointer to
 +       * zero, we need to read the write pointer from hardware and use its
 +       * value because "this register is power context save restored".
 +       * Effectively, these states have been observed:
 +       *
 +       *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
 +       * BDW  | CSB regs not reset       | CSB regs reset       |
 +       * CHT  | CSB regs not reset       | CSB regs not reset   |
 +       */
 +      next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
 +                                                 & GEN8_CSB_PTR_MASK);
 +
 +      /*
 +       * When the CSB registers are reset (also after power-up / gpu reset),
 +       * CSB write pointer is set to all 1's, which is not valid, use '5' in
 +       * this special case, so the first element read is CSB[0].
 +       */
 +      if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
 +              next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
 +
 +      ring->next_context_status_buffer = next_context_status_buffer_hw;
        DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
  
        memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
index d194492263eb861551c3289c5b51305c26ebd9ed,0cfe4c14866a05f2ce031034878b5f0eedba1673..ec010ee74050dd1c48ac8438651baab083ff6a33
@@@ -246,8 -246,7 +246,8 @@@ static void skl_power_well_post_enable(
        }
  
        if (power_well->data == SKL_DISP_PW_1) {
 -              intel_prepare_ddi(dev);
 +              if (!dev_priv->power_domains.initializing)
 +                      intel_prepare_ddi(dev);
                gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
        }
  }
@@@ -657,9 -656,15 +657,15 @@@ static void skl_set_power_well(struct d
                }
        } else {
                if (enable_requested) {
-                       I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
-                       POSTING_READ(HSW_PWR_WELL_DRIVER);
-                       DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
+                       if (IS_SKYLAKE(dev) &&
+                               (power_well->data == SKL_DISP_PW_1) &&
+                               (intel_csr_load_status_get(dev_priv) == FW_LOADED))
+                               DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
+                       else {
+                               I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
+                               POSTING_READ(HSW_PWR_WELL_DRIVER);
+                               DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
+                       }
  
                        if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
                                power_well->data == SKL_DISP_PW_2) {
@@@ -988,8 -993,29 +994,29 @@@ static void assert_chv_phy_status(struc
                lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
        u32 phy_control = dev_priv->chv_phy_control;
        u32 phy_status = 0;
+       u32 phy_status_mask = 0xffffffff;
        u32 tmp;
  
+       /*
+        * The BIOS can leave the PHY is some weird state
+        * where it doesn't fully power down some parts.
+        * Disable the asserts until the PHY has been fully
+        * reset (ie. the power well has been disabled at
+        * least once).
+        */
+       if (!dev_priv->chv_phy_assert[DPIO_PHY0])
+               phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
+                                    PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
+       if (!dev_priv->chv_phy_assert[DPIO_PHY1])
+               phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
+                                    PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
        if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
                phy_status |= PHY_POWERGOOD(DPIO_PHY0);
  
                        phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
        }
  
+       phy_status &= phy_status_mask;
        /*
         * The PHY may be busy with some initial calibration and whatnot,
         * so the power state can take a while to actually change.
         */
-       if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS)) == phy_status, 10))
+       if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
                WARN(phy_status != tmp,
                     "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
                     tmp, phy_status, dev_priv->chv_phy_control);
@@@ -1147,6 -1175,9 +1176,9 @@@ static void chv_dpio_cmn_power_well_dis
        DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
                      phy, dev_priv->chv_phy_control);
  
+       /* PHY is fully reset now, so we can enable the PHY state asserts */
+       dev_priv->chv_phy_assert[phy] = true;
        assert_chv_phy_status(dev_priv);
  }
  
@@@ -1156,6 -1187,16 +1188,16 @@@ static void assert_chv_phy_powergate(st
        enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
        u32 reg, val, expected, actual;
  
+       /*
+        * The BIOS can leave the PHY is some weird state
+        * where it doesn't fully power down some parts.
+        * Disable the asserts until the PHY has been fully
+        * reset (ie. the power well has been disabled at
+        * least once).
+        */
+       if (!dev_priv->chv_phy_assert[phy])
+               return;
        if (ch == DPIO_CH0)
                reg = _CHV_CMN_DW0_CH0;
        else
@@@ -1823,7 -1864,6 +1865,6 @@@ static void intel_runtime_pm_disable(st
  
        /* Make sure we're not suspended first. */
        pm_runtime_get_sync(device);
-       pm_runtime_disable(device);
  }
  
  /**
@@@ -1912,6 -1952,10 +1953,10 @@@ static void chv_phy_control_init(struc
                        PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
  
                dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+               dev_priv->chv_phy_assert[DPIO_PHY0] = false;
+       } else {
+               dev_priv->chv_phy_assert[DPIO_PHY0] = true;
        }
  
        if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
                        PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
  
                dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+               dev_priv->chv_phy_assert[DPIO_PHY1] = false;
+       } else {
+               dev_priv->chv_phy_assert[DPIO_PHY1] = true;
        }
  
        I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
@@@ -2115,8 -2163,6 +2164,6 @@@ void intel_runtime_pm_enable(struct drm
        if (!HAS_RUNTIME_PM(dev))
                return;
  
-       pm_runtime_set_active(device);
        /*
         * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
         * requirement.