Merge tag 'topic/drm-misc-2016-06-01' of git://anongit.freedesktop.org/drm-intel into drm-next

Frist -misc pull for 4.8, with pretty much just random all over plus a few
more lockless gem BO patches acked/reviewed by driver maintainers.

I'm starting a bit earlier this time around because there's a few invasive
patch series to land (nonblocking atomic prep work, fence prep work,
rst/sphinx kerneldoc finally happening) and I need a baseline with all the
branches merged.

* tag 'topic/drm-misc-2016-06-01' of git://anongit.freedesktop.org/drm-intel: (21 commits)
  drm/vc4: Use lockless gem BO free callback
  drm/vc4: Use drm_gem_object_unreference_unlocked
  drm: Initialize a linear gamma table by default
  drm/vgem: Use lockless gem BO free callback
  drm/qxl: Don't set a gamma table size
  drm/msm: Nuke dummy gamma_set/get functions
  drm/cirrus: Drop redundnant gamma size check
  drm/fb-helper: Remove dead code in setcolreg
  drm/mediatek: Use lockless gem BO free callback
  drm/hisilicon: Use lockless gem BO free callback
  drm/hlcd: Use lockless gem BO free callback
  vga_switcheroo: Support deferred probing of audio clients
  vga_switcheroo: Add helper for deferred probing
  virtio-gpu: fix output lookup
  drm/doc: Unify KMS Locking docs
  drm/atomic-helper: Do not call ->mode_fixup for CRTC which will be disabled
  Fix annoyingly awkward typo in drm_edid_load.c
  drm/doc: Drop vblank_disable_allow wording
  drm: use seqlock for vblank time/count
  drm/mm: avoid possible null pointer dereference
  ...
This commit is contained in:
Dave Airlie 2016-06-02 07:50:23 +10:00
commit 65439b68bb
27 changed files with 104 additions and 216 deletions

View File

@ -1092,22 +1092,6 @@ int max_width, max_height;</synopsis>
operation.
</para>
</sect2>
<sect2>
<title>Locking</title>
<para>
Beside some lookup structures with their own locking (which is hidden
behind the interface functions) most of the modeset state is protected
by the <code>dev-&lt;mode_config.lock</code> mutex and additionally
per-crtc locks to allow cursor updates, pageflips and similar operations
to occur concurrently with background tasks like output detection.
Operations which cross domains like a full modeset always grab all
locks. Drivers there need to protect resources shared between crtcs with
additional locking. They also need to be careful to always grab the
relevant crtc locks if a modset functions touches crtc state, e.g. for
load detection (which does only grab the <code>mode_config.lock</code>
to allow concurrent screen updates on live crtcs).
</para>
</sect2>
</sect1>
<!-- Internals: kms initialization and cleanup -->
@ -2845,14 +2829,7 @@ void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
<para>
Drivers must initialize the vertical blanking handling core with a call to
<function>drm_vblank_init</function> in their
<methodname>load</methodname> operation. The function will set the struct
<structname>drm_device</structname>
<structfield>vblank_disable_allowed</structfield> field to 0. This will
keep vertical blanking interrupts enabled permanently until the first mode
set operation, where <structfield>vblank_disable_allowed</structfield> is
set to 1. The reason behind this is not clear. Drivers can set the field
to 1 after <function>calling drm_vblank_init</function> to make vertical
blanking interrupts dynamically managed from the beginning.
<methodname>load</methodname> operation.
</para>
<para>
Vertical blanking interrupts can be enabled by the DRM core or by drivers

View File

@ -3854,6 +3854,9 @@ T: git git://people.freedesktop.org/~airlied/linux
S: Maintained
F: drivers/gpu/drm/
F: drivers/gpu/vga/
F: Documentation/devicetree/bindings/display/
F: Documentation/devicetree/bindings/gpu/
F: Documentation/devicetree/bindings/video/
F: Documentation/DocBook/gpu.*
F: include/drm/
F: include/uapi/drm/

View File

@ -316,7 +316,7 @@ static struct drm_driver hdlcd_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = hdlcd_enable_vblank,
.disable_vblank = hdlcd_disable_vblank,
.gem_free_object = drm_gem_cma_free_object,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,

View File

@ -331,9 +331,6 @@ static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
int i;
if (size != CIRRUS_LUT_SIZE)
return;
for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
cirrus_crtc->lut_r[i] = red[i];
cirrus_crtc->lut_g[i] = green[i];

View File

@ -414,6 +414,9 @@ mode_fixup(struct drm_atomic_state *state)
for_each_crtc_in_state(state, crtc, crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
if (!crtc_state->enable)
continue;
if (!crtc_state->mode_changed &&
!crtc_state->connectors_changed)
continue;

View File

@ -5139,6 +5139,9 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
{
uint16_t *r_base, *g_base, *b_base;
int i;
crtc->gamma_size = gamma_size;
crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
@ -5148,6 +5151,16 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
return -ENOMEM;
}
r_base = crtc->gamma_store;
g_base = r_base + gamma_size;
b_base = g_base + gamma_size;
for (i = 0; i < gamma_size; i++) {
r_base[i] = i << 8;
g_base[i] = i << 8;
b_base[i] = i << 8;
}
return 0;
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);

View File

@ -271,7 +271,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
* by commas, search through the list looking for one that
* matches the connector.
*
* If there's one or more that don't't specify a connector, keep
* If there's one or more that doesn't specify a connector, keep
* the last one found one as a fallback.
*/
fwstr = kstrdup(edid_firmware, GFP_KERNEL);

View File

@ -1042,7 +1042,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
int pindex;
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 *palette;
@ -1074,38 +1073,10 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
!fb_helper->funcs->gamma_get))
return -EINVAL;
pindex = regno;
WARN_ON(fb->bits_per_pixel != 8);
if (fb->bits_per_pixel == 16) {
pindex = regno << 3;
fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
if (fb->depth == 16 && regno > 63)
return -EINVAL;
if (fb->depth == 15 && regno > 31)
return -EINVAL;
if (fb->depth == 16) {
u16 r, g, b;
int i;
if (regno < 32) {
for (i = 0; i < 8; i++)
fb_helper->funcs->gamma_set(crtc, red,
green, blue, pindex + i);
}
fb_helper->funcs->gamma_get(crtc, &r,
&g, &b,
pindex >> 1);
for (i = 0; i < 4; i++)
fb_helper->funcs->gamma_set(crtc, r,
green, b,
(pindex >> 1) + i);
}
}
if (fb->depth != 16)
fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
return 0;
}

View File

@ -42,10 +42,6 @@
#include <linux/vgaarb.h>
#include <linux/export.h>
/* Access macro for slots in vblank timestamp ringbuffer. */
#define vblanktimestamp(dev, pipe, count) \
((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
*/
@ -82,29 +78,15 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
struct timeval *t_vblank, u32 last)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 tslot;
assert_spin_locked(&dev->vblank_time_lock);
vblank->last = last;
/* All writers hold the spinlock, but readers are serialized by
* the latching of vblank->count below.
*/
tslot = vblank->count + vblank_count_inc;
vblanktimestamp(dev, pipe, tslot) = *t_vblank;
/*
* vblank timestamp updates are protected on the write side with
* vblank_time_lock, but on the read side done locklessly using a
* sequence-lock on the vblank counter. Ensure correct ordering using
* memory barrriers. We need the barrier both before and also after the
* counter update to synchronize with the next timestamp write.
* The read-side barriers for this are in drm_vblank_count_and_time.
*/
smp_wmb();
write_seqlock(&vblank->seqlock);
vblank->time = *t_vblank;
vblank->count += vblank_count_inc;
smp_wmb();
write_sequnlock(&vblank->seqlock);
}
/**
@ -205,7 +187,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
const struct timeval *t_old;
u64 diff_ns;
t_old = &vblanktimestamp(dev, pipe, vblank->count);
t_old = &vblank->time;
diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
/*
@ -239,49 +221,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
diff = 1;
}
/*
* FIMXE: Need to replace this hack with proper seqlocks.
*
* Restrict the bump of the software vblank counter to a safe maximum
* value of +1 whenever there is the possibility that concurrent readers
* of vblank timestamps could be active at the moment, as the current
* implementation of the timestamp caching and updating is not safe
* against concurrent readers for calls to store_vblank() with a bump
* of anything but +1. A bump != 1 would very likely return corrupted
* timestamps to userspace, because the same slot in the cache could
* be concurrently written by store_vblank() and read by one of those
* readers without the read-retry logic detecting the collision.
*
* Concurrent readers can exist when we are called from the
* drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
* irq callers. However, all those calls to us are happening with the
* vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
* can't increase while we are executing. Therefore a zero refcount at
* this point is safe for arbitrary counter bumps if we are called
* outside vblank irq, a non-zero count is not 100% safe. Unfortunately
* we must also accept a refcount of 1, as whenever we are called from
* drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
* we must let that one pass through in order to not lose vblank counts
* during vblank irq off - which would completely defeat the whole
* point of this routine.
*
* Whenever we are called from vblank irq, we have to assume concurrent
* readers exist or can show up any time during our execution, even if
* the refcount is currently zero, as vblank irqs are usually only
* enabled due to the presence of readers, and because when we are called
* from vblank irq we can't hold the vbl_lock to protect us from sudden
* bumps in vblank refcount. Therefore also restrict bumps to +1 when
* called from vblank irq.
*/
if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
(flags & DRM_CALLED_FROM_VBLIRQ))) {
DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
"refcount %u, vblirq %u\n", pipe, diff,
atomic_read(&vblank->refcount),
(flags & DRM_CALLED_FROM_VBLIRQ) != 0);
diff = 1;
}
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
@ -417,6 +356,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
init_waitqueue_head(&vblank->queue);
setup_timer(&vblank->disable_timer, vblank_disable_fn,
(unsigned long)vblank);
seqlock_init(&vblank->seqlock);
}
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@ -986,25 +926,19 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int count = DRM_TIMESTAMP_MAXRETRIES;
u32 cur_vblank;
u32 vblank_count;
unsigned int seq;
if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
/*
* Vblank timestamps are read lockless. To ensure consistency the vblank
* counter is rechecked and ordering is ensured using memory barriers.
* This works like a seqlock. The write-side barriers are in store_vblank.
*/
do {
cur_vblank = vblank->count;
smp_rmb();
*vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
smp_rmb();
} while (cur_vblank != vblank->count && --count > 0);
seq = read_seqbegin(&vblank->seqlock);
vblank_count = vblank->count;
*vblanktime = vblank->time;
} while (read_seqretry(&vblank->seqlock, seq));
return cur_vblank;
return vblank_count;
}
EXPORT_SYMBOL(drm_vblank_count_and_time);

View File

@ -179,12 +179,14 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole;
u64 end = node->start + node->size;
u64 end;
u64 hole_start;
u64 hole_end;
BUG_ON(node == NULL);
end = node->start + node->size;
/* Find the relevant hole to add our node to */
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (hole_start > node->start || hole_end < end)

View File

@ -30,12 +30,12 @@
*
* As KMS moves toward more fine grained locking, and atomic ioctl where
* userspace can indirectly control locking order, it becomes necessary
* to use ww_mutex and acquire-contexts to avoid deadlocks. But because
* to use &ww_mutex and acquire-contexts to avoid deadlocks. But because
* the locking is more distributed around the driver code, we want a bit
* of extra utility/tracking out of our acquire-ctx. This is provided
* by drm_modeset_lock / drm_modeset_acquire_ctx.
*
* For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
* For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt
*
* The basic usage pattern is to:
*
@ -51,6 +51,13 @@
* ... do stuff ...
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
*
* On top of of these per-object locks using &ww_mutex there's also an overall
* dev->mode_config.lock, for protecting everything else. Mostly this means
* probe state of connectors, and preventing hotplug add/removal of connectors.
*
* Finally there's a bunch of dedicated locks to protect drm core internal
* lists and lookup data structures.
*/
/**

View File

@ -491,7 +491,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc;
int i;
uint16_t *r_base, *g_base, *b_base;
/* We allocate a extra array of drm_connector pointers
* for fbdev after the crtc */
@ -519,16 +518,10 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
gma_crtc->pipe = pipe;
gma_crtc->plane = pipe;
r_base = gma_crtc->base.gamma_store;
g_base = r_base + 256;
b_base = g_base + 256;
for (i = 0; i < 256; i++) {
gma_crtc->lut_r[i] = i;
gma_crtc->lut_g[i] = i;
gma_crtc->lut_b[i] = i;
r_base[i] = i << 8;
g_base[i] = i << 8;
b_base[i] = i << 8;
gma_crtc->lut_adj[i] = 0;
}

View File

@ -173,7 +173,7 @@ static struct drm_driver kirin_drm_driver = {
.fops = &kirin_drm_fops,
.set_busid = drm_platform_set_busid,
.gem_free_object = drm_gem_cma_free_object,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = kirin_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,

View File

@ -35,11 +35,9 @@
#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc_helper.h>
@ -1030,13 +1028,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
/*
* apple-gmux is needed on dual GPU MacBook Pro
* to probe the panel if we're the inactive GPU.
*/
if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
apple_gmux_present() && pdev != vga_default_device() &&
!vga_switcheroo_handler_flags())
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
return drm_get_pci_dev(pdev, ent, &driver);

View File

@ -243,7 +243,7 @@ static struct drm_driver mtk_drm_driver = {
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
.gem_free_object = mtk_drm_gem_free_object,
.gem_free_object_unlocked = mtk_drm_gem_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = mtk_drm_gem_dumb_create,
.dumb_map_offset = mtk_drm_gem_dumb_map_offset,

View File

@ -184,21 +184,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
return ret;
}
static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
u16 red, u16 green, u16 blue, int regno)
{
DBG("fbdev: set gamma");
}
static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue, int regno)
{
DBG("fbdev: get gamma");
}
static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
.gamma_set = msm_crtc_fb_gamma_set,
.gamma_get = msm_crtc_fb_gamma_get,
.fb_probe = msm_fbdev_create,
};

View File

@ -22,13 +22,11 @@
* Authors: Ben Skeggs
*/
#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include "drmP.h"
@ -315,13 +313,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
bool boot = false;
int ret;
/*
* apple-gmux is needed on dual GPU MacBook Pro
* to probe the panel if we're the inactive GPU.
*/
if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
apple_gmux_present() && pdev != vga_default_device() &&
!vga_switcheroo_handler_flags())
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
/* remove conflicting drivers (vesafb, efifb etc) */

View File

@ -730,7 +730,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
qxl_crtc->index = crtc_id;
drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
return 0;
}

View File

@ -34,11 +34,9 @@
#include "radeon_drv.h"
#include <drm/drm_pciids.h>
#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_gem.h>
@ -340,13 +338,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret == -EPROBE_DEFER)
return ret;
/*
* apple-gmux is needed on dual GPU MacBook Pro
* to probe the panel if we're the inactive GPU.
*/
if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
apple_gmux_present() && pdev != vga_default_device() &&
!vga_switcheroo_handler_flags())
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
/* Get rid of things like offb */

View File

@ -291,8 +291,6 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
/* Called on the last userspace/kernel unreference of the BO. Returns
* it to the BO cache if possible, otherwise frees it.
*
* Note that this is called with the struct_mutex held.
*/
void vc4_free_object(struct drm_gem_object *gem_bo)
{

View File

@ -99,7 +99,7 @@ static struct drm_driver vc4_drm_driver = {
#endif
.gem_create_object = vc4_create_object,
.gem_free_object = vc4_free_object,
.gem_free_object_unlocked = vc4_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,

View File

@ -53,10 +53,8 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
{
unsigned int i;
mutex_lock(&dev->struct_mutex);
for (i = 0; i < state->user_state.bo_count; i++)
drm_gem_object_unreference(state->bo[i]);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(state->bo[i]);
kfree(state);
}
@ -687,11 +685,9 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned i;
/* Need the struct lock for drm_gem_object_unreference(). */
mutex_lock(&dev->struct_mutex);
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
drm_gem_object_unreference(&exec->bo[i]->base);
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
kfree(exec->bo);
}
@ -699,9 +695,8 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_bo *bo = list_first_entry(&exec->unref_list,
struct vc4_bo, unref_head);
list_del(&bo->unref_head);
drm_gem_object_unreference(&bo->base.base);
drm_gem_object_unreference_unlocked(&bo->base.base);
}
mutex_unlock(&dev->struct_mutex);
mutex_lock(&vc4->power_lock);
if (--vc4->power_refcount == 0)

View File

@ -235,7 +235,7 @@ static const struct file_operations vgem_driver_fops = {
static struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM,
.gem_free_object = vgem_gem_free_object,
.gem_free_object_unlocked = vgem_gem_free_object,
.gem_vm_ops = &vgem_gem_vm_ops,
.ioctls = vgem_ioctls,
.fops = &vgem_driver_fops,

View File

@ -63,11 +63,17 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc);
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo;
uint32_t handle;
if (plane->state->crtc)
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
if (old_state->crtc)
output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
WARN_ON(!output);
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->obj);

View File

@ -30,6 +30,7 @@
#define pr_fmt(fmt) "vga_switcheroo: " fmt
#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/debugfs.h>
#include <linux/fb.h>
@ -308,7 +309,8 @@ static int register_client(struct pci_dev *pdev,
*
* Register vga client (GPU). Enable vga_switcheroo if another GPU and a
* handler have already registered. The power state of the client is assumed
* to be ON.
* to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called
* to ensure that all prerequisites are met.
*
* Return: 0 on success, -ENOMEM on memory allocation error.
*/
@ -329,7 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
* @id: client identifier
*
* Register audio client (audio device on a GPU). The power state of the
* client is assumed to be ON.
* client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer()
* shall be called to ensure that all prerequisites are met.
*
* Return: 0 on success, -ENOMEM on memory allocation error.
*/
@ -375,6 +378,33 @@ find_active_client(struct list_head *head)
return NULL;
}
/**
* vga_switcheroo_client_probe_defer() - whether to defer probing a given client
* @pdev: client pci device
*
* Determine whether any prerequisites are not fulfilled to probe a given
* client. Drivers shall invoke this early on in their ->probe callback
* and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not
* register the client ere thou hast called this.
*
* Return: %true if probing should be deferred, otherwise %false.
*/
bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
{
if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
/*
* apple-gmux is needed on pre-retina MacBook Pro
* to probe the panel if pdev is the inactive GPU.
*/
if (apple_gmux_present() && pdev != vga_default_device() &&
!vgasr_priv.handler_flags)
return true;
}
return false;
}
EXPORT_SYMBOL(vga_switcheroo_client_probe_defer);
/**
* vga_switcheroo_get_client_state() - obtain power state of a given client
* @pdev: client pci device

View File

@ -52,6 +52,7 @@
#include <linux/poll.h>
#include <linux/ratelimit.h>
#include <linux/sched.h>
#include <linux/seqlock.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
@ -392,11 +393,6 @@ struct drm_master {
void *driver_priv;
};
/* Size of ringbuffer for vblank timestamps. Just double-buffer
* in initial implementation.
*/
#define DRM_VBLANKTIME_RBSIZE 2
/* Flags and return codes for get_vblank_timestamp() driver function. */
#define DRM_CALLED_FROM_VBLIRQ 1
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
@ -725,10 +721,10 @@ struct drm_vblank_crtc {
wait_queue_head_t queue; /**< VBLANK wait queue */
struct timer_list disable_timer; /* delayed disable timer */
/* vblank counter, protected by dev->vblank_time_lock for writes */
u32 count;
/* vblank timestamps, protected by dev->vblank_time_lock for writes */
struct timeval time[DRM_VBLANKTIME_RBSIZE];
seqlock_t seqlock; /* protects vblank count and time */
u32 count; /* vblank counter */
struct timeval time; /* vblank timestamp */
atomic_t refcount; /* number of users of vblank interruptsper crtc */
u32 last; /* protected by dev->vbl_lock, used */

View File

@ -165,6 +165,7 @@ int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
int vga_switcheroo_process_delayed_switch(void);
bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev);
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
@ -188,6 +189,7 @@ static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(v
static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}