drm-misc-next for v4.18:
UAPI Changes: - Fix render node number regression from control node removal. Driver Changes: - Small header fix for virgl, used by qemu. - Use vm_fault_t in qxl. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEuXvWqAysSYEJGuVH/lWMcqZwE8MFAlr6l0MACgkQ/lWMcqZw E8NFURAAkZ9TqzHRcn/3fMijuv/JslT0mW4CIFw3jm5diCXqrsKQ7TlQK8/q5uys 2vLgsTIKFzIp73ptUf11vyoKnvZA5SHHt55s2glrTDfKS9dkmgP+DcMab6qN31Y3 sPY54o0R/8lBd8YOZh8nfYbHngoeD4SHPIzZYETssklhip/CupMcHs2GtXlsO8nP 9ZNZyJHz0/GQUagUrFRgmVmq1ZF9t9Ap8fVBrkWUoQ02MogTFrPCj6F5AuBds9hR ZRhGZLu3Ri1BdP06JraDTvbWZ8jQKF9yD4PuQVGAjkuz4LpRrNQCCKJQ+GMSXMTK JmOB5Yty6/pN+WS+FeV7czbdS/bJDXLXzRh9dc0WOop4gCZPzmYC9HOBSZCZ8i1k dcpGsJGcgzXgFSgZYISkkn50YUnbdjk44tonSK1qAWvNuo4SdZJFGoMoE3Mxf3/R LMlfskWX3w8jzo7cWFkSvkh5Zt9MEaKJRKOGm4VUXidZSN/3/ZI1GbyRCZ8eeGxr R6FK7GYHszBS+whWsA0NcX4KwS7qbktm5JGKpZCevIh9cscBVzQ3tbIugV0EgfAm 8VK1F9VU760pXoN+RRfdH61nZ2QTzmDGZwVtTz0ta5MlkW7YY4bvL5PNbUXYffaK 4EOk/RypONazqJ0B2ZUEHC47/8ETlfJ0bnYIqfR6O7bo7KPfWGQ= =LG6G -----END PGP SIGNATURE----- Merge tag 'drm-misc-next-2018-05-15' of git://anongit.freedesktop.org/drm/drm-misc into drm-next drm-misc-next for v4.18: UAPI Changes: - Fix render node number regression from control node removal. Driver Changes: - Small header fix for virgl, used by qemu. - Use vm_fault_t in qxl. Signed-off-by: Dave Airlie <airlied@redhat.com> # gpg: Signature made Tue 15 May 2018 06:16:03 PM AEST # gpg: using RSA key FE558C72A67013C3 # gpg: Can't check signature: public key not found Link: https://patchwork.freedesktop.org/patch/msgid/e63306b9-67a0-74ab-8883-08b3d9db72d2@mblankhorst.nl
This commit is contained in:
commit
2045b22461
|
@ -0,0 +1,28 @@
|
|||
Broadcom V3D GPU
|
||||
|
||||
Only the Broadcom V3D 3.x and newer GPUs are covered by this binding.
|
||||
For V3D 2.x, see brcm,bcm-vc4.txt.
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "brcm,7268-v3d" or "brcm,7278-v3d"
|
||||
- reg: Physical base addresses and lengths of the register areas
|
||||
- reg-names: Names for the register areas. The "hub", "bridge", and "core0"
|
||||
register areas are always required. The "gca" register area
|
||||
is required if the GCA cache controller is present.
|
||||
- interrupts: The interrupt numbers. The first interrupt is for the hub,
|
||||
while the following interrupts are for the cores.
|
||||
See bindings/interrupt-controller/interrupts.txt
|
||||
|
||||
Optional properties:
|
||||
- clocks: The core clock the unit runs on
|
||||
|
||||
v3d {
|
||||
compatible = "brcm,7268-v3d";
|
||||
reg = <0xf1204000 0x100>,
|
||||
<0xf1200000 0x4000>,
|
||||
<0xf1208000 0x4000>,
|
||||
<0xf1204100 0x100>;
|
||||
reg-names = "bridge", "hub", "core0", "gca";
|
||||
interrupts = <0 78 4>,
|
||||
<0 77 4>;
|
||||
};
|
|
@ -10,6 +10,7 @@ GPU Driver Documentation
|
|||
tegra
|
||||
tinydrm
|
||||
tve200
|
||||
v3d
|
||||
vc4
|
||||
bridge/dw-hdmi
|
||||
xen-front
|
||||
|
|
|
@ -4786,6 +4786,14 @@ S: Maintained
|
|||
F: drivers/gpu/drm/omapdrm/
|
||||
F: Documentation/devicetree/bindings/display/ti/
|
||||
|
||||
DRM DRIVERS FOR V3D
|
||||
M: Eric Anholt <eric@anholt.net>
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/v3d/
|
||||
F: include/uapi/drm/v3d_drm.h
|
||||
F: Documentation/devicetree/bindings/display/brcm,bcm-v3d.txt
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
|
||||
DRM DRIVERS FOR VC4
|
||||
M: Eric Anholt <eric@anholt.net>
|
||||
T: git git://github.com/anholt/linux
|
||||
|
|
|
@ -62,8 +62,6 @@ struct sync_pt {
|
|||
struct rb_node node;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SW_SYNC
|
||||
|
||||
extern const struct file_operations sw_sync_debugfs_fops;
|
||||
|
||||
void sync_timeline_debug_add(struct sync_timeline *obj);
|
||||
|
@ -72,12 +70,4 @@ void sync_file_debug_add(struct sync_file *fence);
|
|||
void sync_file_debug_remove(struct sync_file *fence);
|
||||
void sync_dump(void);
|
||||
|
||||
#else
|
||||
# define sync_timeline_debug_add(obj)
|
||||
# define sync_timeline_debug_remove(obj)
|
||||
# define sync_file_debug_add(fence)
|
||||
# define sync_file_debug_remove(fence)
|
||||
# define sync_dump()
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_SYNC_H */
|
||||
|
|
|
@ -49,16 +49,17 @@ config DRM_DEBUG_MM
|
|||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_DEBUG_MM_SELFTEST
|
||||
tristate "kselftests for DRM range manager (struct drm_mm)"
|
||||
config DRM_DEBUG_SELFTEST
|
||||
tristate "kselftests for DRM"
|
||||
depends on DRM
|
||||
depends on DEBUG_KERNEL
|
||||
select PRIME_NUMBERS
|
||||
select DRM_LIB_RANDOM
|
||||
select DRM_KMS_HELPER
|
||||
default n
|
||||
help
|
||||
This option provides a kernel module that can be used to test
|
||||
the DRM range manager (drm_mm) and its API. This option is not
|
||||
This option provides kernel modules that can be used to run
|
||||
various selftests on parts of the DRM api. This option is not
|
||||
useful for distributions or general kernels, but only for kernel
|
||||
developers working on DRM and associated drivers.
|
||||
|
||||
|
@ -267,6 +268,8 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig"
|
|||
|
||||
source "drivers/gpu/drm/imx/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/v3d/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/vc4/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/etnaviv/Kconfig"
|
||||
|
|
|
@ -43,7 +43,7 @@ drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
|
|||
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
|
||||
|
||||
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
|
||||
obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/
|
||||
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
|
||||
|
||||
obj-$(CONFIG_DRM) += drm.o
|
||||
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
|
||||
|
@ -61,6 +61,7 @@ obj-$(CONFIG_DRM_MGA) += mga/
|
|||
obj-$(CONFIG_DRM_I810) += i810/
|
||||
obj-$(CONFIG_DRM_I915) += i915/
|
||||
obj-$(CONFIG_DRM_MGAG200) += mgag200/
|
||||
obj-$(CONFIG_DRM_V3D) += v3d/
|
||||
obj-$(CONFIG_DRM_VC4) += vc4/
|
||||
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
|
||||
obj-$(CONFIG_DRM_SIS) += sis/
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
config DRM_I2C_ADV7511
|
||||
tristate "AV7511 encoder"
|
||||
tristate "ADV7511 encoder"
|
||||
depends on OF
|
||||
select DRM_KMS_HELPER
|
||||
select REGMAP_I2C
|
||||
|
|
|
@ -1127,7 +1127,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
|
|||
}
|
||||
|
||||
if (adv7511->gpio_pd) {
|
||||
mdelay(5);
|
||||
usleep_range(5000, 6000);
|
||||
gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -1425,7 +1425,9 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
|
|||
{
|
||||
struct drm_plane *plane = plane_state->plane;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
/* Nothing to do for same crtc*/
|
||||
if (plane_state->crtc == crtc)
|
||||
return 0;
|
||||
if (plane_state->crtc) {
|
||||
crtc_state = drm_atomic_get_crtc_state(plane_state->state,
|
||||
plane_state->crtc);
|
||||
|
|
|
@ -766,7 +766,7 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
|
|||
if (crtc_state->enable)
|
||||
drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
|
||||
|
||||
plane_state->visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale);
|
||||
plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
|
||||
|
||||
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
|
||||
|
||||
|
|
|
@ -1069,7 +1069,7 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
|
|||
goto nomem;
|
||||
|
||||
for (i = 0; i < num_modes; i++)
|
||||
drm_property_add_enum(dev->mode_config.tv_mode_property, i,
|
||||
drm_property_add_enum(dev->mode_config.tv_mode_property,
|
||||
i, modes[i]);
|
||||
|
||||
dev->mode_config.tv_brightness_property =
|
||||
|
@ -1156,7 +1156,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
|
|||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_property *scaling_mode_property;
|
||||
int i, j = 0;
|
||||
int i;
|
||||
const unsigned valid_scaling_mode_mask =
|
||||
(1U << ARRAY_SIZE(drm_scaling_mode_enum_list)) - 1;
|
||||
|
||||
|
@ -1177,7 +1177,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
|
|||
if (!(BIT(i) & scaling_mode_mask))
|
||||
continue;
|
||||
|
||||
ret = drm_property_add_enum(scaling_mode_property, j++,
|
||||
ret = drm_property_add_enum(scaling_mode_property,
|
||||
drm_scaling_mode_enum_list[i].type,
|
||||
drm_scaling_mode_enum_list[i].name);
|
||||
|
||||
|
@ -1531,8 +1531,10 @@ static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *conne
|
|||
return connector->encoder;
|
||||
}
|
||||
|
||||
static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
|
||||
const struct drm_file *file_priv)
|
||||
static bool
|
||||
drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
|
||||
const struct list_head *export_list,
|
||||
const struct drm_file *file_priv)
|
||||
{
|
||||
/*
|
||||
* If user-space hasn't configured the driver to expose the stereo 3D
|
||||
|
@ -1540,6 +1542,23 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
|
|||
*/
|
||||
if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
|
||||
return false;
|
||||
/*
|
||||
* If user-space hasn't configured the driver to expose the modes
|
||||
* with aspect-ratio, don't expose them. However if such a mode
|
||||
* is unique, let it be exposed, but reset the aspect-ratio flags
|
||||
* while preparing the list of user-modes.
|
||||
*/
|
||||
if (!file_priv->aspect_ratio_allowed) {
|
||||
struct drm_display_mode *mode_itr;
|
||||
|
||||
list_for_each_entry(mode_itr, export_list, export_head)
|
||||
if (drm_mode_match(mode_itr, mode,
|
||||
DRM_MODE_MATCH_TIMINGS |
|
||||
DRM_MODE_MATCH_CLOCK |
|
||||
DRM_MODE_MATCH_FLAGS |
|
||||
DRM_MODE_MATCH_3D_FLAGS))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1559,6 +1578,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
|
|||
struct drm_mode_modeinfo u_mode;
|
||||
struct drm_mode_modeinfo __user *mode_ptr;
|
||||
uint32_t __user *encoder_ptr;
|
||||
LIST_HEAD(export_list);
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -EINVAL;
|
||||
|
@ -1607,21 +1627,31 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
|
|||
|
||||
/* delayed so we get modes regardless of pre-fill_modes state */
|
||||
list_for_each_entry(mode, &connector->modes, head)
|
||||
if (drm_mode_expose_to_userspace(mode, file_priv))
|
||||
if (drm_mode_expose_to_userspace(mode, &export_list,
|
||||
file_priv)) {
|
||||
list_add_tail(&mode->export_head, &export_list);
|
||||
mode_count++;
|
||||
}
|
||||
|
||||
/*
|
||||
* This ioctl is called twice, once to determine how much space is
|
||||
* needed, and the 2nd time to fill it.
|
||||
* The modes that need to be exposed to the user are maintained in the
|
||||
* 'export_list'. When the ioctl is called first time to determine the,
|
||||
* space, the export_list gets filled, to find the no.of modes. In the
|
||||
* 2nd time, the user modes are filled, one by one from the export_list.
|
||||
*/
|
||||
if ((out_resp->count_modes >= mode_count) && mode_count) {
|
||||
copied = 0;
|
||||
mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
|
||||
list_for_each_entry(mode, &connector->modes, head) {
|
||||
if (!drm_mode_expose_to_userspace(mode, file_priv))
|
||||
continue;
|
||||
|
||||
list_for_each_entry(mode, &export_list, export_head) {
|
||||
drm_mode_convert_to_umode(&u_mode, mode);
|
||||
/*
|
||||
* Reset aspect ratio flags of user-mode, if modes with
|
||||
* aspect-ratio are not supported.
|
||||
*/
|
||||
if (!file_priv->aspect_ratio_allowed)
|
||||
u_mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
|
||||
if (copy_to_user(mode_ptr + copied,
|
||||
&u_mode, sizeof(u_mode))) {
|
||||
ret = -EFAULT;
|
||||
|
|
|
@ -449,6 +449,8 @@ int drm_mode_getcrtc(struct drm_device *dev,
|
|||
crtc_resp->mode_valid = 0;
|
||||
}
|
||||
}
|
||||
if (!file_priv->aspect_ratio_allowed)
|
||||
crtc_resp->mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
|
||||
drm_modeset_unlock(&crtc->mutex);
|
||||
|
||||
return 0;
|
||||
|
@ -628,6 +630,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
|||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (!file_priv->aspect_ratio_allowed &&
|
||||
(crtc_req->mode.flags & DRM_MODE_FLAG_PIC_AR_MASK) != DRM_MODE_FLAG_PIC_AR_NONE) {
|
||||
DRM_DEBUG_KMS("Unexpected aspect-ratio flag bits\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
|
||||
if (ret) {
|
||||
|
|
|
@ -119,18 +119,32 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
|
|||
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
|
||||
|
||||
void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
|
||||
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
|
||||
int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
|
||||
DP_TRAINING_AUX_RD_MASK;
|
||||
|
||||
if (rd_interval > 4)
|
||||
DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
|
||||
rd_interval);
|
||||
|
||||
if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
|
||||
udelay(100);
|
||||
else
|
||||
mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
|
||||
mdelay(rd_interval * 4);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
|
||||
|
||||
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
|
||||
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
|
||||
int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
|
||||
DP_TRAINING_AUX_RD_MASK;
|
||||
|
||||
if (rd_interval > 4)
|
||||
DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
|
||||
rd_interval);
|
||||
|
||||
if (rd_interval == 0)
|
||||
udelay(400);
|
||||
else
|
||||
mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
|
||||
mdelay(rd_interval * 4);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
|
||||
|
||||
|
|
|
@ -99,8 +99,6 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
|
|||
return &dev->primary;
|
||||
case DRM_MINOR_RENDER:
|
||||
return &dev->render;
|
||||
case DRM_MINOR_CONTROL:
|
||||
return &dev->control;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -567,7 +565,6 @@ int drm_dev_init(struct drm_device *dev,
|
|||
err_minors:
|
||||
drm_minor_free(dev, DRM_MINOR_PRIMARY);
|
||||
drm_minor_free(dev, DRM_MINOR_RENDER);
|
||||
drm_minor_free(dev, DRM_MINOR_CONTROL);
|
||||
drm_fs_inode_free(dev->anon_inode);
|
||||
err_free:
|
||||
mutex_destroy(&dev->master_mutex);
|
||||
|
@ -603,7 +600,6 @@ void drm_dev_fini(struct drm_device *dev)
|
|||
|
||||
drm_minor_free(dev, DRM_MINOR_PRIMARY);
|
||||
drm_minor_free(dev, DRM_MINOR_RENDER);
|
||||
drm_minor_free(dev, DRM_MINOR_CONTROL);
|
||||
|
||||
mutex_destroy(&dev->master_mutex);
|
||||
mutex_destroy(&dev->ctxlist_mutex);
|
||||
|
@ -796,10 +792,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
mutex_lock(&drm_global_mutex);
|
||||
|
||||
ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
|
||||
if (ret)
|
||||
goto err_minors;
|
||||
|
||||
ret = drm_minor_register(dev, DRM_MINOR_RENDER);
|
||||
if (ret)
|
||||
goto err_minors;
|
||||
|
@ -837,7 +829,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
|
|||
remove_compat_control_link(dev);
|
||||
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
|
||||
drm_minor_unregister(dev, DRM_MINOR_RENDER);
|
||||
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
|
||||
out_unlock:
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
return ret;
|
||||
|
@ -882,7 +873,6 @@ void drm_dev_unregister(struct drm_device *dev)
|
|||
remove_compat_control_link(dev);
|
||||
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
|
||||
drm_minor_unregister(dev, DRM_MINOR_RENDER);
|
||||
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dev_unregister);
|
||||
|
||||
|
|
|
@ -2930,11 +2930,15 @@ cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode)
|
|||
static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
|
||||
unsigned int clock_tolerance)
|
||||
{
|
||||
unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
|
||||
u8 vic;
|
||||
|
||||
if (!to_match->clock)
|
||||
return 0;
|
||||
|
||||
if (to_match->picture_aspect_ratio)
|
||||
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
|
||||
|
||||
for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
|
||||
struct drm_display_mode cea_mode = edid_cea_modes[vic];
|
||||
unsigned int clock1, clock2;
|
||||
|
@ -2948,7 +2952,7 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
|
|||
continue;
|
||||
|
||||
do {
|
||||
if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode))
|
||||
if (drm_mode_match(to_match, &cea_mode, match_flags))
|
||||
return vic;
|
||||
} while (cea_mode_alternate_timings(vic, &cea_mode));
|
||||
}
|
||||
|
@ -2965,11 +2969,15 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
|
|||
*/
|
||||
u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
|
||||
{
|
||||
unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
|
||||
u8 vic;
|
||||
|
||||
if (!to_match->clock)
|
||||
return 0;
|
||||
|
||||
if (to_match->picture_aspect_ratio)
|
||||
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
|
||||
|
||||
for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
|
||||
struct drm_display_mode cea_mode = edid_cea_modes[vic];
|
||||
unsigned int clock1, clock2;
|
||||
|
@ -2983,7 +2991,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
|
|||
continue;
|
||||
|
||||
do {
|
||||
if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode))
|
||||
if (drm_mode_match(to_match, &cea_mode, match_flags))
|
||||
return vic;
|
||||
} while (cea_mode_alternate_timings(vic, &cea_mode));
|
||||
}
|
||||
|
@ -3030,6 +3038,7 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
|
|||
static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match,
|
||||
unsigned int clock_tolerance)
|
||||
{
|
||||
unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
|
||||
u8 vic;
|
||||
|
||||
if (!to_match->clock)
|
||||
|
@ -3047,7 +3056,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
|
|||
abs(to_match->clock - clock2) > clock_tolerance)
|
||||
continue;
|
||||
|
||||
if (drm_mode_equal_no_clocks(to_match, hdmi_mode))
|
||||
if (drm_mode_match(to_match, hdmi_mode, match_flags))
|
||||
return vic;
|
||||
}
|
||||
|
||||
|
@ -3064,6 +3073,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
|
|||
*/
|
||||
static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
|
||||
{
|
||||
unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
|
||||
u8 vic;
|
||||
|
||||
if (!to_match->clock)
|
||||
|
@ -3079,7 +3089,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
|
|||
|
||||
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
|
||||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
|
||||
drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
|
||||
drm_mode_match(to_match, hdmi_mode, match_flags))
|
||||
return vic;
|
||||
}
|
||||
return 0;
|
||||
|
@ -4823,6 +4833,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
|
|||
const struct drm_display_mode *mode,
|
||||
bool is_hdmi2_sink)
|
||||
{
|
||||
enum hdmi_picture_aspect picture_aspect;
|
||||
int err;
|
||||
|
||||
if (!frame || !mode)
|
||||
|
@ -4865,13 +4876,23 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
|
|||
* Populate picture aspect ratio from either
|
||||
* user input (if specified) or from the CEA mode list.
|
||||
*/
|
||||
if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 ||
|
||||
mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9)
|
||||
frame->picture_aspect = mode->picture_aspect_ratio;
|
||||
else if (frame->video_code > 0)
|
||||
frame->picture_aspect = drm_get_cea_aspect_ratio(
|
||||
frame->video_code);
|
||||
picture_aspect = mode->picture_aspect_ratio;
|
||||
if (picture_aspect == HDMI_PICTURE_ASPECT_NONE)
|
||||
picture_aspect = drm_get_cea_aspect_ratio(frame->video_code);
|
||||
|
||||
/*
|
||||
* The infoframe can't convey anything but none, 4:3
|
||||
* and 16:9, so if the user has asked for anything else
|
||||
* we can only satisfy it by specifying the right VIC.
|
||||
*/
|
||||
if (picture_aspect > HDMI_PICTURE_ASPECT_16_9) {
|
||||
if (picture_aspect !=
|
||||
drm_get_cea_aspect_ratio(frame->video_code))
|
||||
return -EINVAL;
|
||||
picture_aspect = HDMI_PICTURE_ASPECT_NONE;
|
||||
}
|
||||
|
||||
frame->picture_aspect = picture_aspect;
|
||||
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
|
||||
frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
|
||||
|
||||
|
|
|
@ -2183,7 +2183,11 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
|
|||
for (j = 0; j < i; j++) {
|
||||
if (!enabled[j])
|
||||
continue;
|
||||
if (!drm_mode_equal(modes[j], modes[i]))
|
||||
if (!drm_mode_match(modes[j], modes[i],
|
||||
DRM_MODE_MATCH_TIMINGS |
|
||||
DRM_MODE_MATCH_CLOCK |
|
||||
DRM_MODE_MATCH_FLAGS |
|
||||
DRM_MODE_MATCH_3D_FLAGS))
|
||||
can_clone = false;
|
||||
}
|
||||
}
|
||||
|
@ -2203,7 +2207,11 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
|
|||
|
||||
fb_helper_conn = fb_helper->connector_info[i];
|
||||
list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
|
||||
if (drm_mode_equal(mode, dmt_mode))
|
||||
if (drm_mode_match(mode, dmt_mode,
|
||||
DRM_MODE_MATCH_TIMINGS |
|
||||
DRM_MODE_MATCH_CLOCK |
|
||||
DRM_MODE_MATCH_FLAGS |
|
||||
DRM_MODE_MATCH_3D_FLAGS))
|
||||
modes[i] = mode;
|
||||
}
|
||||
if (!modes[i])
|
||||
|
|
|
@ -484,8 +484,7 @@ int drm_mode_getfb(struct drm_device *dev,
|
|||
* backwards-compatibility reasons, we cannot make GET_FB() privileged,
|
||||
* so just return an invalid handle for non-masters.
|
||||
*/
|
||||
if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN) &&
|
||||
!drm_is_control_client(file_priv)) {
|
||||
if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) {
|
||||
r->handle = 0;
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
|
|
@ -105,7 +105,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
|
|||
.desc = compat_ptr(v32.desc),
|
||||
};
|
||||
err = drm_ioctl_kernel(file, drm_version, &v,
|
||||
DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW);
|
||||
DRM_UNLOCKED|DRM_RENDER_ALLOW);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -885,7 +885,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
|
|||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64,
|
||||
DRM_CONTROL_ALLOW|DRM_UNLOCKED);
|
||||
DRM_UNLOCKED);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -324,6 +324,15 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|||
return -EINVAL;
|
||||
file_priv->atomic = req->value;
|
||||
file_priv->universal_planes = req->value;
|
||||
/*
|
||||
* No atomic user-space blows up on aspect ratio mode bits.
|
||||
*/
|
||||
file_priv->aspect_ratio_allowed = req->value;
|
||||
break;
|
||||
case DRM_CLIENT_CAP_ASPECT_RATIO:
|
||||
if (req->value > 1)
|
||||
return -EINVAL;
|
||||
file_priv->aspect_ratio_allowed = req->value;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -510,13 +519,7 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
|
|||
|
||||
/* MASTER is only for master or control clients */
|
||||
if (unlikely((flags & DRM_MASTER) &&
|
||||
!drm_is_current_master(file_priv) &&
|
||||
!drm_is_control_client(file_priv)))
|
||||
return -EACCES;
|
||||
|
||||
/* Control clients must be explicitly allowed */
|
||||
if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
|
||||
drm_is_control_client(file_priv)))
|
||||
!drm_is_current_master(file_priv)))
|
||||
return -EACCES;
|
||||
|
||||
/* Render clients must be explicitly allowed */
|
||||
|
@ -539,7 +542,7 @@ EXPORT_SYMBOL(drm_ioctl_permit);
|
|||
/* Ioctl table */
|
||||
static const struct drm_ioctl_desc drm_ioctls[] = {
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
|
||||
DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
|
||||
DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
|
||||
|
@ -613,41 +616,41 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_UNLOCKED),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_UNLOCKED),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl,
|
||||
DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
|
@ -665,10 +668,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
|
|||
DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
};
|
||||
|
||||
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
||||
|
|
|
@ -939,6 +939,99 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_mode_duplicate);
|
||||
|
||||
static bool drm_mode_match_timings(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2)
|
||||
{
|
||||
return mode1->hdisplay == mode2->hdisplay &&
|
||||
mode1->hsync_start == mode2->hsync_start &&
|
||||
mode1->hsync_end == mode2->hsync_end &&
|
||||
mode1->htotal == mode2->htotal &&
|
||||
mode1->hskew == mode2->hskew &&
|
||||
mode1->vdisplay == mode2->vdisplay &&
|
||||
mode1->vsync_start == mode2->vsync_start &&
|
||||
mode1->vsync_end == mode2->vsync_end &&
|
||||
mode1->vtotal == mode2->vtotal &&
|
||||
mode1->vscan == mode2->vscan;
|
||||
}
|
||||
|
||||
static bool drm_mode_match_clock(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2)
|
||||
{
|
||||
/*
|
||||
* do clock check convert to PICOS
|
||||
* so fb modes get matched the same
|
||||
*/
|
||||
if (mode1->clock && mode2->clock)
|
||||
return KHZ2PICOS(mode1->clock) == KHZ2PICOS(mode2->clock);
|
||||
else
|
||||
return mode1->clock == mode2->clock;
|
||||
}
|
||||
|
||||
static bool drm_mode_match_flags(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2)
|
||||
{
|
||||
return (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
|
||||
(mode2->flags & ~DRM_MODE_FLAG_3D_MASK);
|
||||
}
|
||||
|
||||
static bool drm_mode_match_3d_flags(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2)
|
||||
{
|
||||
return (mode1->flags & DRM_MODE_FLAG_3D_MASK) ==
|
||||
(mode2->flags & DRM_MODE_FLAG_3D_MASK);
|
||||
}
|
||||
|
||||
static bool drm_mode_match_aspect_ratio(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2)
|
||||
{
|
||||
return mode1->picture_aspect_ratio == mode2->picture_aspect_ratio;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mode_match - test modes for (partial) equality
|
||||
* @mode1: first mode
|
||||
* @mode2: second mode
|
||||
* @match_flags: which parts need to match (DRM_MODE_MATCH_*)
|
||||
*
|
||||
* Check to see if @mode1 and @mode2 are equivalent.
|
||||
*
|
||||
* Returns:
|
||||
* True if the modes are (partially) equal, false otherwise.
|
||||
*/
|
||||
bool drm_mode_match(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2,
|
||||
unsigned int match_flags)
|
||||
{
|
||||
if (!mode1 && !mode2)
|
||||
return true;
|
||||
|
||||
if (!mode1 || !mode2)
|
||||
return false;
|
||||
|
||||
if (match_flags & DRM_MODE_MATCH_TIMINGS &&
|
||||
!drm_mode_match_timings(mode1, mode2))
|
||||
return false;
|
||||
|
||||
if (match_flags & DRM_MODE_MATCH_CLOCK &&
|
||||
!drm_mode_match_clock(mode1, mode2))
|
||||
return false;
|
||||
|
||||
if (match_flags & DRM_MODE_MATCH_FLAGS &&
|
||||
!drm_mode_match_flags(mode1, mode2))
|
||||
return false;
|
||||
|
||||
if (match_flags & DRM_MODE_MATCH_3D_FLAGS &&
|
||||
!drm_mode_match_3d_flags(mode1, mode2))
|
||||
return false;
|
||||
|
||||
if (match_flags & DRM_MODE_MATCH_ASPECT_RATIO &&
|
||||
!drm_mode_match_aspect_ratio(mode1, mode2))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_match);
|
||||
|
||||
/**
|
||||
* drm_mode_equal - test modes for equality
|
||||
* @mode1: first mode
|
||||
|
@ -949,23 +1042,15 @@ EXPORT_SYMBOL(drm_mode_duplicate);
|
|||
* Returns:
|
||||
* True if the modes are equal, false otherwise.
|
||||
*/
|
||||
bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
|
||||
bool drm_mode_equal(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2)
|
||||
{
|
||||
if (!mode1 && !mode2)
|
||||
return true;
|
||||
|
||||
if (!mode1 || !mode2)
|
||||
return false;
|
||||
|
||||
/* do clock check convert to PICOS so fb modes get matched
|
||||
* the same */
|
||||
if (mode1->clock && mode2->clock) {
|
||||
if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
|
||||
return false;
|
||||
} else if (mode1->clock != mode2->clock)
|
||||
return false;
|
||||
|
||||
return drm_mode_equal_no_clocks(mode1, mode2);
|
||||
return drm_mode_match(mode1, mode2,
|
||||
DRM_MODE_MATCH_TIMINGS |
|
||||
DRM_MODE_MATCH_CLOCK |
|
||||
DRM_MODE_MATCH_FLAGS |
|
||||
DRM_MODE_MATCH_3D_FLAGS|
|
||||
DRM_MODE_MATCH_ASPECT_RATIO);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_equal);
|
||||
|
||||
|
@ -980,13 +1065,13 @@ EXPORT_SYMBOL(drm_mode_equal);
|
|||
* Returns:
|
||||
* True if the modes are equal, false otherwise.
|
||||
*/
|
||||
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
|
||||
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2)
|
||||
{
|
||||
if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
|
||||
(mode2->flags & DRM_MODE_FLAG_3D_MASK))
|
||||
return false;
|
||||
|
||||
return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
|
||||
return drm_mode_match(mode1, mode2,
|
||||
DRM_MODE_MATCH_TIMINGS |
|
||||
DRM_MODE_MATCH_FLAGS |
|
||||
DRM_MODE_MATCH_3D_FLAGS);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_equal_no_clocks);
|
||||
|
||||
|
@ -1004,21 +1089,9 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks);
|
|||
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2)
|
||||
{
|
||||
if (mode1->hdisplay == mode2->hdisplay &&
|
||||
mode1->hsync_start == mode2->hsync_start &&
|
||||
mode1->hsync_end == mode2->hsync_end &&
|
||||
mode1->htotal == mode2->htotal &&
|
||||
mode1->hskew == mode2->hskew &&
|
||||
mode1->vdisplay == mode2->vdisplay &&
|
||||
mode1->vsync_start == mode2->vsync_start &&
|
||||
mode1->vsync_end == mode2->vsync_end &&
|
||||
mode1->vtotal == mode2->vtotal &&
|
||||
mode1->vscan == mode2->vscan &&
|
||||
(mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
|
||||
(mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return drm_mode_match(mode1, mode2,
|
||||
DRM_MODE_MATCH_TIMINGS |
|
||||
DRM_MODE_MATCH_FLAGS);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
|
||||
|
||||
|
@ -1575,6 +1648,26 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
|
|||
out->vrefresh = in->vrefresh;
|
||||
out->flags = in->flags;
|
||||
out->type = in->type;
|
||||
|
||||
switch (in->picture_aspect_ratio) {
|
||||
case HDMI_PICTURE_ASPECT_4_3:
|
||||
out->flags |= DRM_MODE_FLAG_PIC_AR_4_3;
|
||||
break;
|
||||
case HDMI_PICTURE_ASPECT_16_9:
|
||||
out->flags |= DRM_MODE_FLAG_PIC_AR_16_9;
|
||||
break;
|
||||
case HDMI_PICTURE_ASPECT_64_27:
|
||||
out->flags |= DRM_MODE_FLAG_PIC_AR_64_27;
|
||||
break;
|
||||
case HDMI_PICTURE_ASPECT_256_135:
|
||||
out->flags |= DRM_MODE_FLAG_PIC_AR_256_135;
|
||||
break;
|
||||
case HDMI_PICTURE_ASPECT_RESERVED:
|
||||
default:
|
||||
out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
|
||||
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
|
||||
}
|
||||
|
@ -1621,6 +1714,30 @@ int drm_mode_convert_umode(struct drm_device *dev,
|
|||
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
|
||||
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
|
||||
|
||||
/* Clearing picture aspect ratio bits from out flags,
|
||||
* as the aspect-ratio information is not stored in
|
||||
* flags for kernel-mode, but in picture_aspect_ratio.
|
||||
*/
|
||||
out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
|
||||
|
||||
switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) {
|
||||
case DRM_MODE_FLAG_PIC_AR_4_3:
|
||||
out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3;
|
||||
break;
|
||||
case DRM_MODE_FLAG_PIC_AR_16_9:
|
||||
out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9;
|
||||
break;
|
||||
case DRM_MODE_FLAG_PIC_AR_64_27:
|
||||
out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27;
|
||||
break;
|
||||
case DRM_MODE_FLAG_PIC_AR_256_135:
|
||||
out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135;
|
||||
break;
|
||||
default:
|
||||
out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
out->status = drm_mode_validate_driver(dev, out);
|
||||
if (out->status != MODE_OK)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -172,10 +172,9 @@ int drm_get_panel_orientation_quirk(int width, int height)
|
|||
if (!bios_date)
|
||||
continue;
|
||||
|
||||
for (i = 0; data->bios_dates[i]; i++) {
|
||||
if (!strcmp(data->bios_dates[i], bios_date))
|
||||
return data->orientation;
|
||||
}
|
||||
i = match_string(data->bios_dates, -1, bios_date);
|
||||
if (i >= 0)
|
||||
return data->orientation;
|
||||
}
|
||||
|
||||
return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
|
||||
|
|
|
@ -409,7 +409,10 @@ void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
|
|||
struct drm_gem_object *obj = dma_buf->priv;
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
return dev->driver->gem_prime_vmap(obj);
|
||||
if (dev->driver->gem_prime_vmap)
|
||||
return dev->driver->gem_prime_vmap(obj);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
|
||||
|
||||
|
@ -426,7 +429,8 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
|||
struct drm_gem_object *obj = dma_buf->priv;
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
dev->driver->gem_prime_vunmap(obj, vaddr);
|
||||
if (dev->driver->gem_prime_vunmap)
|
||||
dev->driver->gem_prime_vunmap(obj, vaddr);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
|
||||
|
||||
|
|
|
@ -169,9 +169,9 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev,
|
|||
return NULL;
|
||||
|
||||
for (i = 0; i < num_values; i++) {
|
||||
ret = drm_property_add_enum(property, i,
|
||||
props[i].type,
|
||||
props[i].name);
|
||||
ret = drm_property_add_enum(property,
|
||||
props[i].type,
|
||||
props[i].name);
|
||||
if (ret) {
|
||||
drm_property_destroy(dev, property);
|
||||
return NULL;
|
||||
|
@ -209,7 +209,7 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
|
|||
uint64_t supported_bits)
|
||||
{
|
||||
struct drm_property *property;
|
||||
int i, ret, index = 0;
|
||||
int i, ret;
|
||||
int num_values = hweight64(supported_bits);
|
||||
|
||||
flags |= DRM_MODE_PROP_BITMASK;
|
||||
|
@ -221,14 +221,9 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
|
|||
if (!(supported_bits & (1ULL << props[i].type)))
|
||||
continue;
|
||||
|
||||
if (WARN_ON(index >= num_values)) {
|
||||
drm_property_destroy(dev, property);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = drm_property_add_enum(property, index++,
|
||||
props[i].type,
|
||||
props[i].name);
|
||||
ret = drm_property_add_enum(property,
|
||||
props[i].type,
|
||||
props[i].name);
|
||||
if (ret) {
|
||||
drm_property_destroy(dev, property);
|
||||
return NULL;
|
||||
|
@ -376,7 +371,6 @@ EXPORT_SYMBOL(drm_property_create_bool);
|
|||
/**
|
||||
* drm_property_add_enum - add a possible value to an enumeration property
|
||||
* @property: enumeration property to change
|
||||
* @index: index of the new enumeration
|
||||
* @value: value of the new enumeration
|
||||
* @name: symbolic name of the new enumeration
|
||||
*
|
||||
|
@ -388,10 +382,11 @@ EXPORT_SYMBOL(drm_property_create_bool);
|
|||
* Returns:
|
||||
* Zero on success, error code on failure.
|
||||
*/
|
||||
int drm_property_add_enum(struct drm_property *property, int index,
|
||||
int drm_property_add_enum(struct drm_property *property,
|
||||
uint64_t value, const char *name)
|
||||
{
|
||||
struct drm_property_enum *prop_enum;
|
||||
int index = 0;
|
||||
|
||||
if (WARN_ON(strlen(name) >= DRM_PROP_NAME_LEN))
|
||||
return -EINVAL;
|
||||
|
@ -411,8 +406,12 @@ int drm_property_add_enum(struct drm_property *property, int index,
|
|||
list_for_each_entry(prop_enum, &property->enum_list, head) {
|
||||
if (WARN_ON(prop_enum->value == value))
|
||||
return -EINVAL;
|
||||
index++;
|
||||
}
|
||||
|
||||
if (WARN_ON(index >= property->num_values))
|
||||
return -EINVAL;
|
||||
|
||||
prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
|
||||
if (!prop_enum)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -50,13 +50,25 @@ bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_rect_intersect);
|
||||
|
||||
static u32 clip_scaled(u32 src, u32 dst, u32 clip)
|
||||
{
|
||||
u64 tmp = mul_u32_u32(src, dst - clip);
|
||||
|
||||
/*
|
||||
* Round toward 1.0 when clipping so that we don't accidentally
|
||||
* change upscaling to downscaling or vice versa.
|
||||
*/
|
||||
if (src < (dst << 16))
|
||||
return DIV_ROUND_UP_ULL(tmp, dst);
|
||||
else
|
||||
return DIV_ROUND_DOWN_ULL(tmp, dst);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_rect_clip_scaled - perform a scaled clip operation
|
||||
* @src: source window rectangle
|
||||
* @dst: destination window rectangle
|
||||
* @clip: clip rectangle
|
||||
* @hscale: horizontal scaling factor
|
||||
* @vscale: vertical scaling factor
|
||||
*
|
||||
* Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
|
||||
* same amounts multiplied by @hscale and @vscale.
|
||||
|
@ -66,33 +78,44 @@ EXPORT_SYMBOL(drm_rect_intersect);
|
|||
* %false otherwise
|
||||
*/
|
||||
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
|
||||
const struct drm_rect *clip,
|
||||
int hscale, int vscale)
|
||||
const struct drm_rect *clip)
|
||||
{
|
||||
int diff;
|
||||
|
||||
diff = clip->x1 - dst->x1;
|
||||
if (diff > 0) {
|
||||
int64_t tmp = src->x1 + (int64_t) diff * hscale;
|
||||
src->x1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
|
||||
u32 new_src_w = clip_scaled(drm_rect_width(src),
|
||||
drm_rect_width(dst), diff);
|
||||
|
||||
src->x1 = clamp_t(int64_t, src->x2 - new_src_w, INT_MIN, INT_MAX);
|
||||
dst->x1 = clip->x1;
|
||||
}
|
||||
diff = clip->y1 - dst->y1;
|
||||
if (diff > 0) {
|
||||
int64_t tmp = src->y1 + (int64_t) diff * vscale;
|
||||
src->y1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
|
||||
u32 new_src_h = clip_scaled(drm_rect_height(src),
|
||||
drm_rect_height(dst), diff);
|
||||
|
||||
src->y1 = clamp_t(int64_t, src->y2 - new_src_h, INT_MIN, INT_MAX);
|
||||
dst->y1 = clip->y1;
|
||||
}
|
||||
diff = dst->x2 - clip->x2;
|
||||
if (diff > 0) {
|
||||
int64_t tmp = src->x2 - (int64_t) diff * hscale;
|
||||
src->x2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
|
||||
u32 new_src_w = clip_scaled(drm_rect_width(src),
|
||||
drm_rect_width(dst), diff);
|
||||
|
||||
src->x2 = clamp_t(int64_t, src->x1 + new_src_w, INT_MIN, INT_MAX);
|
||||
dst->x2 = clip->x2;
|
||||
}
|
||||
diff = dst->y2 - clip->y2;
|
||||
if (diff > 0) {
|
||||
int64_t tmp = src->y2 - (int64_t) diff * vscale;
|
||||
src->y2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
|
||||
u32 new_src_h = clip_scaled(drm_rect_height(src),
|
||||
drm_rect_height(dst), diff);
|
||||
|
||||
src->y2 = clamp_t(int64_t, src->y1 + new_src_h, INT_MIN, INT_MAX);
|
||||
dst->y2 = clip->y2;
|
||||
}
|
||||
|
||||
return drm_rect_intersect(dst, clip);
|
||||
return drm_rect_visible(dst);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_rect_clip_scaled);
|
||||
|
||||
|
@ -106,7 +129,10 @@ static int drm_calc_scale(int src, int dst)
|
|||
if (dst == 0)
|
||||
return 0;
|
||||
|
||||
scale = src / dst;
|
||||
if (src > (dst << 16))
|
||||
return DIV_ROUND_UP(src, dst);
|
||||
else
|
||||
scale = src / dst;
|
||||
|
||||
return scale;
|
||||
}
|
||||
|
@ -121,6 +147,10 @@ static int drm_calc_scale(int src, int dst)
|
|||
* Calculate the horizontal scaling factor as
|
||||
* (@src width) / (@dst width).
|
||||
*
|
||||
* If the scale is below 1 << 16, round down. If the scale is above
|
||||
* 1 << 16, round up. This will calculate the scale with the most
|
||||
* pessimistic limit calculation.
|
||||
*
|
||||
* RETURNS:
|
||||
* The horizontal scaling factor, or errno of out of limits.
|
||||
*/
|
||||
|
@ -152,6 +182,10 @@ EXPORT_SYMBOL(drm_rect_calc_hscale);
|
|||
* Calculate the vertical scaling factor as
|
||||
* (@src height) / (@dst height).
|
||||
*
|
||||
* If the scale is below 1 << 16, round down. If the scale is above
|
||||
* 1 << 16, round up. This will calculate the scale with the most
|
||||
* pessimistic limit calculation.
|
||||
*
|
||||
* RETURNS:
|
||||
* The vertical scaling factor, or errno of out of limits.
|
||||
*/
|
||||
|
@ -189,6 +223,10 @@ EXPORT_SYMBOL(drm_rect_calc_vscale);
|
|||
* If the calculated scaling factor is above @max_vscale,
|
||||
* decrease the height of rectangle @src to compensate.
|
||||
*
|
||||
* If the scale is below 1 << 16, round down. If the scale is above
|
||||
* 1 << 16, round up. This will calculate the scale with the most
|
||||
* pessimistic limit calculation.
|
||||
*
|
||||
* RETURNS:
|
||||
* The horizontal scaling factor.
|
||||
*/
|
||||
|
@ -239,6 +277,10 @@ EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed);
|
|||
* If the calculated scaling factor is above @max_vscale,
|
||||
* decrease the height of rectangle @src to compensate.
|
||||
*
|
||||
* If the scale is below 1 << 16, round down. If the scale is above
|
||||
* 1 << 16, round up. This will calculate the scale with the most
|
||||
* pessimistic limit calculation.
|
||||
*
|
||||
* RETURNS:
|
||||
* The vertical scaling factor.
|
||||
*/
|
||||
|
@ -373,8 +415,8 @@ EXPORT_SYMBOL(drm_rect_rotate);
|
|||
* them when doing a rotatation and its inverse.
|
||||
* That is, if you do ::
|
||||
*
|
||||
* DRM_MODE_PROP_ROTATE(&r, width, height, rotation);
|
||||
* DRM_MODE_ROTATE_inv(&r, width, height, rotation);
|
||||
* drm_rect_rotate(&r, width, height, rotation);
|
||||
* drm_rect_rotate_inv(&r, width, height, rotation);
|
||||
*
|
||||
* you will always get back the original rectangle.
|
||||
*/
|
||||
|
|
|
@ -331,9 +331,7 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
|
|||
struct device *kdev;
|
||||
int r;
|
||||
|
||||
if (minor->type == DRM_MINOR_CONTROL)
|
||||
minor_str = "controlD%d";
|
||||
else if (minor->type == DRM_MINOR_RENDER)
|
||||
if (minor->type == DRM_MINOR_RENDER)
|
||||
minor_str = "renderD%d";
|
||||
else
|
||||
minor_str = "card%d";
|
||||
|
|
|
@ -485,7 +485,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
|
|||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
|
||||
drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
|
||||
drm_property_add_enum(prop, i-1, force_audio_names[i]);
|
||||
|
||||
dev_priv->force_audio_property = prop;
|
||||
}
|
||||
|
@ -514,7 +514,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
|
|||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
|
||||
drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
|
||||
drm_property_add_enum(prop, i, broadcast_rgb_names[i]);
|
||||
|
||||
dev_priv->broadcast_rgb_property = prop;
|
||||
}
|
||||
|
|
|
@ -2281,7 +2281,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
|
|||
|
||||
for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
|
||||
drm_property_add_enum(
|
||||
psb_intel_sdvo_connector->tv_format, i,
|
||||
psb_intel_sdvo_connector->tv_format,
|
||||
i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
|
||||
|
||||
psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
|
||||
|
|
|
@ -26,7 +26,7 @@ config DRM_I915_DEBUG
|
|||
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
|
||||
select DRM_DEBUG_MM if DRM=y
|
||||
select STACKDEPOT if DRM=y # for DRM_DEBUG_MM
|
||||
select DRM_DEBUG_MM_SELFTEST
|
||||
select DRM_DEBUG_SELFTEST
|
||||
select SW_SYNC # signaling validation framework (igt/syncobj*)
|
||||
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
||||
select DRM_I915_SELFTEST
|
||||
|
|
|
@ -2822,10 +2822,10 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
|
|||
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
|
||||
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
|
||||
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
|
||||
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
|
||||
|
|
|
@ -2779,9 +2779,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
|
|||
return false;
|
||||
|
||||
for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
|
||||
drm_property_add_enum(
|
||||
intel_sdvo_connector->tv_format, i,
|
||||
i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
|
||||
drm_property_add_enum(intel_sdvo_connector->tv_format, i,
|
||||
tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
|
||||
|
||||
intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
|
||||
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
|
||||
|
|
|
@ -936,22 +936,12 @@ intel_check_sprite_plane(struct intel_plane *plane,
|
|||
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct drm_framebuffer *fb = state->base.fb;
|
||||
int crtc_x, crtc_y;
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y, src_w, src_h;
|
||||
struct drm_rect *src = &state->base.src;
|
||||
struct drm_rect *dst = &state->base.dst;
|
||||
struct drm_rect clip = {};
|
||||
int max_stride = INTEL_GEN(dev_priv) >= 9 ? 32768 : 16384;
|
||||
int hscale, vscale;
|
||||
int max_scale, min_scale;
|
||||
bool can_scale;
|
||||
int ret;
|
||||
uint32_t pixel_format = 0;
|
||||
|
||||
*src = drm_plane_state_src(&state->base);
|
||||
*dst = drm_plane_state_dest(&state->base);
|
||||
|
||||
if (!fb) {
|
||||
state->base.visible = false;
|
||||
return 0;
|
||||
|
@ -990,64 +980,19 @@ intel_check_sprite_plane(struct intel_plane *plane,
|
|||
min_scale = plane->can_scale ? 1 : (1 << 16);
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME the following code does a bunch of fuzzy adjustments to the
|
||||
* coordinates and sizes. We probably need some way to decide whether
|
||||
* more strict checking should be done instead.
|
||||
*/
|
||||
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
|
||||
state->base.rotation);
|
||||
|
||||
hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
|
||||
BUG_ON(hscale < 0);
|
||||
|
||||
vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
|
||||
BUG_ON(vscale < 0);
|
||||
|
||||
if (crtc_state->base.enable)
|
||||
drm_mode_get_hv_timing(&crtc_state->base.mode,
|
||||
&clip.x2, &clip.y2);
|
||||
|
||||
state->base.visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale);
|
||||
|
||||
crtc_x = dst->x1;
|
||||
crtc_y = dst->y1;
|
||||
crtc_w = drm_rect_width(dst);
|
||||
crtc_h = drm_rect_height(dst);
|
||||
ret = drm_atomic_helper_check_plane_state(&state->base,
|
||||
&crtc_state->base,
|
||||
min_scale, max_scale,
|
||||
true, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (state->base.visible) {
|
||||
/* check again in case clipping clamped the results */
|
||||
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
|
||||
if (hscale < 0) {
|
||||
DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
|
||||
drm_rect_debug_print("src: ", src, true);
|
||||
drm_rect_debug_print("dst: ", dst, false);
|
||||
|
||||
return hscale;
|
||||
}
|
||||
|
||||
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
|
||||
if (vscale < 0) {
|
||||
DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
|
||||
drm_rect_debug_print("src: ", src, true);
|
||||
drm_rect_debug_print("dst: ", dst, false);
|
||||
|
||||
return vscale;
|
||||
}
|
||||
|
||||
/* Make the source viewport size an exact multiple of the scaling factors. */
|
||||
drm_rect_adjust_size(src,
|
||||
drm_rect_width(dst) * hscale - drm_rect_width(src),
|
||||
drm_rect_height(dst) * vscale - drm_rect_height(src));
|
||||
|
||||
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
|
||||
state->base.rotation);
|
||||
|
||||
/* sanity check to make sure the src viewport wasn't enlarged */
|
||||
WARN_ON(src->x1 < (int) state->base.src_x ||
|
||||
src->y1 < (int) state->base.src_y ||
|
||||
src->x2 > (int) state->base.src_x + state->base.src_w ||
|
||||
src->y2 > (int) state->base.src_y + state->base.src_h);
|
||||
struct drm_rect *src = &state->base.src;
|
||||
struct drm_rect *dst = &state->base.dst;
|
||||
unsigned int crtc_w = drm_rect_width(dst);
|
||||
unsigned int crtc_h = drm_rect_height(dst);
|
||||
uint32_t src_x, src_y, src_w, src_h;
|
||||
|
||||
/*
|
||||
* Hardware doesn't handle subpixel coordinates.
|
||||
|
@ -1060,57 +1005,38 @@ intel_check_sprite_plane(struct intel_plane *plane,
|
|||
src_y = src->y1 >> 16;
|
||||
src_h = drm_rect_height(src) >> 16;
|
||||
|
||||
if (intel_format_is_yuv(fb->format->format)) {
|
||||
src_x &= ~1;
|
||||
src_w &= ~1;
|
||||
|
||||
/*
|
||||
* Must keep src and dst the
|
||||
* same if we can't scale.
|
||||
*/
|
||||
if (!can_scale)
|
||||
crtc_w &= ~1;
|
||||
|
||||
if (crtc_w == 0)
|
||||
state->base.visible = false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check size restrictions when scaling */
|
||||
if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) {
|
||||
unsigned int width_bytes;
|
||||
int cpp = fb->format->cpp[0];
|
||||
|
||||
WARN_ON(!can_scale);
|
||||
|
||||
/* FIXME interlacing min height is 6 */
|
||||
|
||||
if (crtc_w < 3 || crtc_h < 3)
|
||||
state->base.visible = false;
|
||||
|
||||
if (src_w < 3 || src_h < 3)
|
||||
state->base.visible = false;
|
||||
|
||||
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 9 && (src_w > 2048 || src_h > 2048 ||
|
||||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
|
||||
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (state->base.visible) {
|
||||
src->x1 = src_x << 16;
|
||||
src->x2 = (src_x + src_w) << 16;
|
||||
src->y1 = src_y << 16;
|
||||
src->y2 = (src_y + src_h) << 16;
|
||||
}
|
||||
|
||||
dst->x1 = crtc_x;
|
||||
dst->x2 = crtc_x + crtc_w;
|
||||
dst->y1 = crtc_y;
|
||||
dst->y2 = crtc_y + crtc_h;
|
||||
if (intel_format_is_yuv(fb->format->format) &&
|
||||
(src_x % 2 || src_w % 2)) {
|
||||
DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
|
||||
src_x, src_w);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check size restrictions when scaling */
|
||||
if (src_w != crtc_w || src_h != crtc_h) {
|
||||
unsigned int width_bytes;
|
||||
int cpp = fb->format->cpp[0];
|
||||
|
||||
WARN_ON(!can_scale);
|
||||
|
||||
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
|
||||
|
||||
/* FIXME interlacing min height is 6 */
|
||||
if (INTEL_GEN(dev_priv) < 9 && (
|
||||
src_w < 3 || src_h < 3 ||
|
||||
src_w > 2048 || src_h > 2048 ||
|
||||
crtc_w < 3 || crtc_h < 3 ||
|
||||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
|
||||
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
ret = skl_check_plane_surface(crtc_state, state);
|
||||
|
|
|
@ -168,7 +168,6 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
|
|||
if (gpu->funcs->debugfs_init) {
|
||||
gpu->funcs->debugfs_init(gpu, dev->primary);
|
||||
gpu->funcs->debugfs_init(gpu, dev->render);
|
||||
gpu->funcs->debugfs_init(gpu, dev->control);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -140,9 +140,6 @@ int msm_debugfs_late_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
ret = late_init_minor(dev->render);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = late_init_minor(dev->control);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -338,11 +338,9 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
|
|||
if (c) { \
|
||||
p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \
|
||||
l = (list); \
|
||||
c = 0; \
|
||||
while (p && l->gen_mask) { \
|
||||
if (l->gen_mask & (1 << (gen))) { \
|
||||
drm_property_add_enum(p, c, l->type, l->name); \
|
||||
c++; \
|
||||
drm_property_add_enum(p, l->type, l->name); \
|
||||
} \
|
||||
l++; \
|
||||
} \
|
||||
|
|
|
@ -238,12 +238,6 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
|
|||
|
||||
static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
|
||||
{
|
||||
#if 0
|
||||
/* The firmware uses LP DSI transactions like this to bring up
|
||||
* the hardware, which should be faster than using I2C to then
|
||||
* pass to the Toshiba. However, I was unable to get it to
|
||||
* work.
|
||||
*/
|
||||
u8 msg[] = {
|
||||
reg,
|
||||
reg >> 8,
|
||||
|
@ -253,13 +247,7 @@ static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
|
|||
val >> 24,
|
||||
};
|
||||
|
||||
mipi_dsi_dcs_write_buffer(ts->dsi, msg, sizeof(msg));
|
||||
#else
|
||||
rpi_touchscreen_i2c_write(ts, REG_WR_ADDRH, reg >> 8);
|
||||
rpi_touchscreen_i2c_write(ts, REG_WR_ADDRL, reg);
|
||||
rpi_touchscreen_i2c_write(ts, REG_WRITEH, val >> 8);
|
||||
rpi_touchscreen_i2c_write(ts, REG_WRITEL, val);
|
||||
#endif
|
||||
mipi_dsi_generic_write(ts->dsi, msg, sizeof(msg));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ pl111_drm-y += pl111_display.o \
|
|||
pl111_versatile.o \
|
||||
pl111_drv.o
|
||||
|
||||
pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o
|
||||
pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
|
||||
|
||||
obj-$(CONFIG_DRM_PL111) += pl111_drm.o
|
||||
|
|
|
@ -79,6 +79,7 @@ struct pl111_drm_dev_private {
|
|||
const struct pl111_variant_data *variant;
|
||||
void (*variant_display_enable) (struct drm_device *drm, u32 format);
|
||||
void (*variant_display_disable) (struct drm_device *drm);
|
||||
bool use_device_memory;
|
||||
};
|
||||
|
||||
int pl111_display_init(struct drm_device *dev);
|
||||
|
|
|
@ -60,6 +60,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
|
@ -207,6 +208,24 @@ static int pl111_modeset_init(struct drm_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct drm_gem_object *
|
||||
pl111_gem_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
struct pl111_drm_dev_private *priv = dev->dev_private;
|
||||
|
||||
/*
|
||||
* When using device-specific reserved memory we can't import
|
||||
* DMA buffers: those are passed by reference in any global
|
||||
* memory and we can only handle a specific range of memory.
|
||||
*/
|
||||
if (priv->use_device_memory)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
|
||||
}
|
||||
|
||||
DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
|
||||
|
||||
static struct drm_driver pl111_drm_driver = {
|
||||
|
@ -227,7 +246,7 @@ static struct drm_driver pl111_drm_driver = {
|
|||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_import = drm_gem_prime_import,
|
||||
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
|
||||
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
|
||||
.gem_prime_export = drm_gem_prime_export,
|
||||
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
|
||||
|
||||
|
@ -257,6 +276,12 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
|
|||
drm->dev_private = priv;
|
||||
priv->variant = variant;
|
||||
|
||||
ret = of_reserved_mem_device_init(dev);
|
||||
if (!ret) {
|
||||
dev_info(dev, "using device-specific reserved memory\n");
|
||||
priv->use_device_memory = true;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(dev->of_node, "max-memory-bandwidth",
|
||||
&priv->memory_bw)) {
|
||||
dev_info(dev, "no max memory bandwidth specified, assume unlimited\n");
|
||||
|
@ -275,7 +300,8 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
|
|||
priv->regs = devm_ioremap_resource(dev, &amba_dev->res);
|
||||
if (IS_ERR(priv->regs)) {
|
||||
dev_err(dev, "%s failed mmio\n", __func__);
|
||||
return PTR_ERR(priv->regs);
|
||||
ret = PTR_ERR(priv->regs);
|
||||
goto dev_unref;
|
||||
}
|
||||
|
||||
/* This may override some variant settings */
|
||||
|
@ -305,11 +331,14 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
|
|||
|
||||
dev_unref:
|
||||
drm_dev_unref(drm);
|
||||
of_reserved_mem_device_release(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pl111_amba_remove(struct amba_device *amba_dev)
|
||||
{
|
||||
struct device *dev = &amba_dev->dev;
|
||||
struct drm_device *drm = amba_get_drvdata(amba_dev);
|
||||
struct pl111_drm_dev_private *priv = drm->dev_private;
|
||||
|
||||
|
@ -319,6 +348,7 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
|
|||
drm_panel_bridge_remove(priv->bridge);
|
||||
drm_mode_config_cleanup(drm);
|
||||
drm_dev_unref(drm);
|
||||
of_reserved_mem_device_release(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
#include <linux/amba/clcd-regs.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/module.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "pl111_versatile.h"
|
||||
#include "pl111_vexpress.h"
|
||||
#include "pl111_drm.h"
|
||||
|
||||
static struct regmap *versatile_syscon_map;
|
||||
|
@ -22,6 +24,7 @@ enum versatile_clcd {
|
|||
REALVIEW_CLCD_PB11MP,
|
||||
REALVIEW_CLCD_PBA8,
|
||||
REALVIEW_CLCD_PBX,
|
||||
VEXPRESS_CLCD_V2M,
|
||||
};
|
||||
|
||||
static const struct of_device_id versatile_clcd_of_match[] = {
|
||||
|
@ -53,6 +56,10 @@ static const struct of_device_id versatile_clcd_of_match[] = {
|
|||
.compatible = "arm,realview-pbx-syscon",
|
||||
.data = (void *)REALVIEW_CLCD_PBX,
|
||||
},
|
||||
{
|
||||
.compatible = "arm,vexpress-muxfpga",
|
||||
.data = (void *)VEXPRESS_CLCD_V2M,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -286,12 +293,26 @@ static const struct pl111_variant_data pl111_realview = {
|
|||
.fb_bpp = 16,
|
||||
};
|
||||
|
||||
/*
|
||||
* Versatile Express PL111 variant, again we just push the maximum
|
||||
* BPP to 16 to be able to get 1024x768 without saturating the memory
|
||||
* bus. The clockdivider also seems broken on the Versatile Express.
|
||||
*/
|
||||
static const struct pl111_variant_data pl111_vexpress = {
|
||||
.name = "PL111 Versatile Express",
|
||||
.formats = pl111_realview_pixel_formats,
|
||||
.nformats = ARRAY_SIZE(pl111_realview_pixel_formats),
|
||||
.fb_bpp = 16,
|
||||
.broken_clockdivider = true,
|
||||
};
|
||||
|
||||
int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
|
||||
{
|
||||
const struct of_device_id *clcd_id;
|
||||
enum versatile_clcd versatile_clcd_type;
|
||||
struct device_node *np;
|
||||
struct regmap *map;
|
||||
int ret;
|
||||
|
||||
np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
|
||||
&clcd_id);
|
||||
|
@ -301,7 +322,33 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
|
|||
}
|
||||
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
|
||||
|
||||
map = syscon_node_to_regmap(np);
|
||||
/* Versatile Express special handling */
|
||||
if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
|
||||
struct platform_device *pdev;
|
||||
|
||||
/* Registers a driver for the muxfpga */
|
||||
ret = vexpress_muxfpga_init();
|
||||
if (ret) {
|
||||
dev_err(dev, "unable to initialize muxfpga driver\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Call into deep Vexpress configuration API */
|
||||
pdev = of_find_device_by_node(np);
|
||||
if (!pdev) {
|
||||
dev_err(dev, "can't find the sysreg device, deferring\n");
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
map = dev_get_drvdata(&pdev->dev);
|
||||
if (!map) {
|
||||
dev_err(dev, "sysreg has not yet probed\n");
|
||||
platform_device_put(pdev);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
} else {
|
||||
map = syscon_node_to_regmap(np);
|
||||
}
|
||||
|
||||
if (IS_ERR(map)) {
|
||||
dev_err(dev, "no Versatile syscon regmap\n");
|
||||
return PTR_ERR(map);
|
||||
|
@ -340,6 +387,13 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
|
|||
priv->variant_display_disable = pl111_realview_clcd_disable;
|
||||
dev_info(dev, "set up callbacks for RealView PL111\n");
|
||||
break;
|
||||
case VEXPRESS_CLCD_V2M:
|
||||
priv->variant = &pl111_vexpress;
|
||||
dev_info(dev, "initializing Versatile Express PL111\n");
|
||||
ret = pl111_vexpress_clcd_init(dev, priv, map);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
dev_info(dev, "unknown Versatile system controller\n");
|
||||
break;
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Versatile Express PL111 handling
|
||||
* Copyright (C) 2018 Linus Walleij
|
||||
*
|
||||
* This module binds to the "arm,vexpress-muxfpga" device on the
|
||||
* Versatile Express configuration bus and sets up which CLCD instance
|
||||
* gets muxed out on the DVI bridge.
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/vexpress.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include "pl111_drm.h"
|
||||
#include "pl111_vexpress.h"
|
||||
|
||||
#define VEXPRESS_FPGAMUX_MOTHERBOARD 0x00
|
||||
#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1 0x01
|
||||
#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2 0x02
|
||||
|
||||
int pl111_vexpress_clcd_init(struct device *dev,
|
||||
struct pl111_drm_dev_private *priv,
|
||||
struct regmap *map)
|
||||
{
|
||||
struct device_node *root;
|
||||
struct device_node *child;
|
||||
struct device_node *ct_clcd = NULL;
|
||||
bool has_coretile_clcd = false;
|
||||
bool has_coretile_hdlcd = false;
|
||||
bool mux_motherboard = true;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Check if we have a CLCD or HDLCD on the core tile by checking if a
|
||||
* CLCD or HDLCD is available in the root of the device tree.
|
||||
*/
|
||||
root = of_find_node_by_path("/");
|
||||
if (!root)
|
||||
return -EINVAL;
|
||||
|
||||
for_each_available_child_of_node(root, child) {
|
||||
if (of_device_is_compatible(child, "arm,pl111")) {
|
||||
has_coretile_clcd = true;
|
||||
ct_clcd = child;
|
||||
break;
|
||||
}
|
||||
if (of_device_is_compatible(child, "arm,hdlcd")) {
|
||||
has_coretile_hdlcd = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If there is a coretile HDLCD and it has a driver,
|
||||
* do not mux the CLCD on the motherboard to the DVI.
|
||||
*/
|
||||
if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
|
||||
mux_motherboard = false;
|
||||
|
||||
/*
|
||||
* On the Vexpress CA9 we let the CLCD on the coretile
|
||||
* take precedence, so also in this case do not mux the
|
||||
* motherboard to the DVI.
|
||||
*/
|
||||
if (has_coretile_clcd)
|
||||
mux_motherboard = false;
|
||||
|
||||
if (mux_motherboard) {
|
||||
dev_info(dev, "DVI muxed to motherboard CLCD\n");
|
||||
val = VEXPRESS_FPGAMUX_MOTHERBOARD;
|
||||
} else if (ct_clcd == dev->of_node) {
|
||||
dev_info(dev,
|
||||
"DVI muxed to daughterboard 1 (core tile) CLCD\n");
|
||||
val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
|
||||
} else {
|
||||
dev_info(dev, "core tile graphics present\n");
|
||||
dev_info(dev, "this device will be deactivated\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = regmap_write(map, 0, val);
|
||||
if (ret) {
|
||||
dev_err(dev, "error setting DVI muxmode\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This sets up the regmap pointer that will then be retrieved by
|
||||
* the detection code in pl111_versatile.c and passed in to the
|
||||
* pl111_vexpress_clcd_init() function above.
|
||||
*/
|
||||
static int vexpress_muxfpga_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct regmap *map;
|
||||
|
||||
map = devm_regmap_init_vexpress_config(&pdev->dev);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
dev_set_drvdata(dev, map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id vexpress_muxfpga_match[] = {
|
||||
{ .compatible = "arm,vexpress-muxfpga", }
|
||||
};
|
||||
|
||||
static struct platform_driver vexpress_muxfpga_driver = {
|
||||
.driver = {
|
||||
.name = "vexpress-muxfpga",
|
||||
.of_match_table = of_match_ptr(vexpress_muxfpga_match),
|
||||
},
|
||||
.probe = vexpress_muxfpga_probe,
|
||||
};
|
||||
|
||||
int vexpress_muxfpga_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = platform_driver_register(&vexpress_muxfpga_driver);
|
||||
/* -EBUSY just means this driver is already registered */
|
||||
if (ret == -EBUSY)
|
||||
ret = 0;
|
||||
return ret;
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
struct device;
|
||||
struct pl111_drm_dev_private;
|
||||
struct regmap;
|
||||
|
||||
#ifdef CONFIG_ARCH_VEXPRESS
|
||||
|
||||
int pl111_vexpress_clcd_init(struct device *dev,
|
||||
struct pl111_drm_dev_private *priv,
|
||||
struct regmap *map);
|
||||
|
||||
int vexpress_muxfpga_init(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline int pl111_vexpress_clcd_init(struct device *dev,
|
||||
struct pl111_drm_dev_private *priv,
|
||||
struct regmap *map)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int vexpress_muxfpga_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -339,12 +339,9 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
|
|||
surface_height = surf->surf.height;
|
||||
|
||||
if (area->left < 0 || area->top < 0 ||
|
||||
area->right > surface_width || area->bottom > surface_height) {
|
||||
qxl_io_log(qdev, "%s: not doing area update for "
|
||||
"%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
|
||||
area->top, area->right, area->bottom, surface_width, surface_height);
|
||||
area->right > surface_width || area->bottom > surface_height)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&qdev->update_area_mutex);
|
||||
qdev->ram_header->update_area = *area;
|
||||
qdev->ram_header->update_surface = surface_id;
|
||||
|
@ -372,6 +369,7 @@ void qxl_io_flush_surfaces(struct qxl_device *qdev)
|
|||
void qxl_io_destroy_primary(struct qxl_device *qdev)
|
||||
{
|
||||
wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
|
||||
qdev->primary_created = false;
|
||||
}
|
||||
|
||||
void qxl_io_create_primary(struct qxl_device *qdev,
|
||||
|
@ -397,6 +395,7 @@ void qxl_io_create_primary(struct qxl_device *qdev,
|
|||
create->type = QXL_SURF_TYPE_PRIMARY;
|
||||
|
||||
wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
|
||||
qdev->primary_created = true;
|
||||
}
|
||||
|
||||
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
|
||||
|
@ -405,20 +404,6 @@ void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
|
|||
wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
|
||||
}
|
||||
|
||||
void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
|
||||
va_end(args);
|
||||
/*
|
||||
* DO not do a DRM output here - this will call printk, which will
|
||||
* call back into qxl for rendering (qxl_fb)
|
||||
*/
|
||||
outb(0, qdev->io_base + QXL_IO_LOG);
|
||||
}
|
||||
|
||||
void qxl_io_reset(struct qxl_device *qdev)
|
||||
{
|
||||
outb(0, qdev->io_base + QXL_IO_RESET);
|
||||
|
@ -426,19 +411,6 @@ void qxl_io_reset(struct qxl_device *qdev)
|
|||
|
||||
void qxl_io_monitors_config(struct qxl_device *qdev)
|
||||
{
|
||||
qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
|
||||
qdev->monitors_config ?
|
||||
qdev->monitors_config->count : -1,
|
||||
qdev->monitors_config && qdev->monitors_config->count ?
|
||||
qdev->monitors_config->heads[0].width : -1,
|
||||
qdev->monitors_config && qdev->monitors_config->count ?
|
||||
qdev->monitors_config->heads[0].height : -1,
|
||||
qdev->monitors_config && qdev->monitors_config->count ?
|
||||
qdev->monitors_config->heads[0].x : -1,
|
||||
qdev->monitors_config && qdev->monitors_config->count ?
|
||||
qdev->monitors_config->heads[0].y : -1
|
||||
);
|
||||
|
||||
wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
|
||||
}
|
||||
|
||||
|
|
|
@ -48,12 +48,8 @@ static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned c
|
|||
qdev->client_monitors_config = kzalloc(
|
||||
sizeof(struct qxl_monitors_config) +
|
||||
sizeof(struct qxl_head) * count, GFP_KERNEL);
|
||||
if (!qdev->client_monitors_config) {
|
||||
qxl_io_log(qdev,
|
||||
"%s: allocation failure for %u heads\n",
|
||||
__func__, count);
|
||||
if (!qdev->client_monitors_config)
|
||||
return;
|
||||
}
|
||||
}
|
||||
qdev->client_monitors_config->count = count;
|
||||
}
|
||||
|
@ -74,12 +70,8 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
|
|||
num_monitors = qdev->rom->client_monitors_config.count;
|
||||
crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
|
||||
sizeof(qdev->rom->client_monitors_config));
|
||||
if (crc != qdev->rom->client_monitors_config_crc) {
|
||||
qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc,
|
||||
sizeof(qdev->rom->client_monitors_config),
|
||||
qdev->rom->client_monitors_config_crc);
|
||||
if (crc != qdev->rom->client_monitors_config_crc)
|
||||
return MONITORS_CONFIG_BAD_CRC;
|
||||
}
|
||||
if (!num_monitors) {
|
||||
DRM_DEBUG_KMS("no client monitors configured\n");
|
||||
return status;
|
||||
|
@ -170,12 +162,10 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
|
|||
udelay(5);
|
||||
}
|
||||
if (status == MONITORS_CONFIG_BAD_CRC) {
|
||||
qxl_io_log(qdev, "config: bad crc\n");
|
||||
DRM_DEBUG_KMS("ignoring client monitors config: bad crc");
|
||||
return;
|
||||
}
|
||||
if (status == MONITORS_CONFIG_UNCHANGED) {
|
||||
qxl_io_log(qdev, "config: unchanged\n");
|
||||
DRM_DEBUG_KMS("ignoring client monitors config: unchanged");
|
||||
return;
|
||||
}
|
||||
|
@ -268,6 +258,89 @@ static int qxl_add_common_modes(struct drm_connector *connector,
|
|||
return i - 1;
|
||||
}
|
||||
|
||||
static void qxl_send_monitors_config(struct qxl_device *qdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
BUG_ON(!qdev->ram_header->monitors_config);
|
||||
|
||||
if (qdev->monitors_config->count == 0)
|
||||
return;
|
||||
|
||||
for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
|
||||
struct qxl_head *head = &qdev->monitors_config->heads[i];
|
||||
|
||||
if (head->y > 8192 || head->x > 8192 ||
|
||||
head->width > 8192 || head->height > 8192) {
|
||||
DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
|
||||
i, head->width, head->height,
|
||||
head->x, head->y);
|
||||
return;
|
||||
}
|
||||
}
|
||||
qxl_io_monitors_config(qdev);
|
||||
}
|
||||
|
||||
static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
|
||||
const char *reason)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
|
||||
struct qxl_head head;
|
||||
int oldcount, i = qcrtc->index;
|
||||
|
||||
if (!qdev->primary_created) {
|
||||
DRM_DEBUG_KMS("no primary surface, skip (%s)\n", reason);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!qdev->monitors_config ||
|
||||
qdev->monitors_config->max_allowed <= i)
|
||||
return;
|
||||
|
||||
head.id = i;
|
||||
head.flags = 0;
|
||||
oldcount = qdev->monitors_config->count;
|
||||
if (crtc->state->active) {
|
||||
struct drm_display_mode *mode = &crtc->mode;
|
||||
head.width = mode->hdisplay;
|
||||
head.height = mode->vdisplay;
|
||||
head.x = crtc->x;
|
||||
head.y = crtc->y;
|
||||
if (qdev->monitors_config->count < i + 1)
|
||||
qdev->monitors_config->count = i + 1;
|
||||
} else if (i > 0) {
|
||||
head.width = 0;
|
||||
head.height = 0;
|
||||
head.x = 0;
|
||||
head.y = 0;
|
||||
if (qdev->monitors_config->count == i + 1)
|
||||
qdev->monitors_config->count = i;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("inactive head 0, skip (%s)\n", reason);
|
||||
return;
|
||||
}
|
||||
|
||||
if (head.width == qdev->monitors_config->heads[i].width &&
|
||||
head.height == qdev->monitors_config->heads[i].height &&
|
||||
head.x == qdev->monitors_config->heads[i].x &&
|
||||
head.y == qdev->monitors_config->heads[i].y &&
|
||||
oldcount == qdev->monitors_config->count)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("head %d, %dx%d, at +%d+%d, %s (%s)\n",
|
||||
i, head.width, head.height, head.x, head.y,
|
||||
crtc->state->active ? "on" : "off", reason);
|
||||
if (oldcount != qdev->monitors_config->count)
|
||||
DRM_DEBUG_KMS("active heads %d -> %d (%d total)\n",
|
||||
oldcount, qdev->monitors_config->count,
|
||||
qdev->monitors_config->max_allowed);
|
||||
|
||||
qdev->monitors_config->heads[i] = head;
|
||||
qxl_send_monitors_config(qdev);
|
||||
}
|
||||
|
||||
static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
{
|
||||
|
@ -283,6 +356,8 @@ static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
|
|||
drm_crtc_send_vblank_event(crtc, event);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
qxl_crtc_update_monitors_config(crtc, "flush");
|
||||
}
|
||||
|
||||
static void qxl_crtc_destroy(struct drm_crtc *crtc)
|
||||
|
@ -381,95 +456,19 @@ qxl_framebuffer_init(struct drm_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
|
||||
qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n",
|
||||
__func__,
|
||||
mode->hdisplay, mode->vdisplay,
|
||||
adjusted_mode->hdisplay,
|
||||
adjusted_mode->vdisplay);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
qxl_send_monitors_config(struct qxl_device *qdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
BUG_ON(!qdev->ram_header->monitors_config);
|
||||
|
||||
if (qdev->monitors_config->count == 0) {
|
||||
qxl_io_log(qdev, "%s: 0 monitors??\n", __func__);
|
||||
return;
|
||||
}
|
||||
for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
|
||||
struct qxl_head *head = &qdev->monitors_config->heads[i];
|
||||
|
||||
if (head->y > 8192 || head->x > 8192 ||
|
||||
head->width > 8192 || head->height > 8192) {
|
||||
DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
|
||||
i, head->width, head->height,
|
||||
head->x, head->y);
|
||||
return;
|
||||
}
|
||||
}
|
||||
qxl_io_monitors_config(qdev);
|
||||
}
|
||||
|
||||
static void qxl_monitors_config_set(struct qxl_device *qdev,
|
||||
int index,
|
||||
unsigned x, unsigned y,
|
||||
unsigned width, unsigned height,
|
||||
unsigned surf_id)
|
||||
{
|
||||
DRM_DEBUG_KMS("%d:%dx%d+%d+%d\n", index, width, height, x, y);
|
||||
qdev->monitors_config->heads[index].x = x;
|
||||
qdev->monitors_config->heads[index].y = y;
|
||||
qdev->monitors_config->heads[index].width = width;
|
||||
qdev->monitors_config->heads[index].height = height;
|
||||
qdev->monitors_config->heads[index].surface_id = surf_id;
|
||||
|
||||
}
|
||||
|
||||
static void qxl_mode_set_nofb(struct drm_crtc *crtc)
|
||||
{
|
||||
struct qxl_device *qdev = crtc->dev->dev_private;
|
||||
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
|
||||
struct drm_display_mode *mode = &crtc->mode;
|
||||
|
||||
DRM_DEBUG("Mode set (%d,%d)\n",
|
||||
mode->hdisplay, mode->vdisplay);
|
||||
|
||||
qxl_monitors_config_set(qdev, qcrtc->index, 0, 0,
|
||||
mode->hdisplay, mode->vdisplay, 0);
|
||||
|
||||
}
|
||||
|
||||
static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
{
|
||||
DRM_DEBUG("\n");
|
||||
qxl_crtc_update_monitors_config(crtc, "enable");
|
||||
}
|
||||
|
||||
static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
{
|
||||
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
|
||||
struct qxl_device *qdev = crtc->dev->dev_private;
|
||||
|
||||
qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 0, 0, 0);
|
||||
|
||||
qxl_send_monitors_config(qdev);
|
||||
qxl_crtc_update_monitors_config(crtc, "disable");
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
|
||||
.mode_fixup = qxl_crtc_mode_fixup,
|
||||
.mode_set_nofb = qxl_mode_set_nofb,
|
||||
.atomic_flush = qxl_crtc_atomic_flush,
|
||||
.atomic_enable = qxl_crtc_atomic_enable,
|
||||
.atomic_disable = qxl_crtc_atomic_disable,
|
||||
|
@ -613,12 +612,6 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
|
|||
}
|
||||
}
|
||||
|
||||
static int qxl_plane_atomic_check(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qxl_cursor_atomic_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
|
@ -824,7 +817,6 @@ static const uint32_t qxl_cursor_plane_formats[] = {
|
|||
};
|
||||
|
||||
static const struct drm_plane_helper_funcs qxl_cursor_helper_funcs = {
|
||||
.atomic_check = qxl_plane_atomic_check,
|
||||
.atomic_update = qxl_cursor_atomic_update,
|
||||
.atomic_disable = qxl_cursor_atomic_disable,
|
||||
.prepare_fb = qxl_plane_prepare_fb,
|
||||
|
@ -949,81 +941,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
|
|||
return r;
|
||||
}
|
||||
|
||||
static void qxl_enc_dpms(struct drm_encoder *encoder, int mode)
|
||||
{
|
||||
DRM_DEBUG("\n");
|
||||
}
|
||||
|
||||
static void qxl_enc_prepare(struct drm_encoder *encoder)
|
||||
{
|
||||
DRM_DEBUG("\n");
|
||||
}
|
||||
|
||||
static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
int i;
|
||||
struct qxl_output *output = drm_encoder_to_qxl_output(encoder);
|
||||
struct qxl_head *head;
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
BUG_ON(!encoder);
|
||||
/* TODO: ugly, do better */
|
||||
i = output->index;
|
||||
if (!qdev->monitors_config ||
|
||||
qdev->monitors_config->max_allowed <= i) {
|
||||
DRM_ERROR(
|
||||
"head number too large or missing monitors config: %p, %d",
|
||||
qdev->monitors_config,
|
||||
qdev->monitors_config ?
|
||||
qdev->monitors_config->max_allowed : -1);
|
||||
return;
|
||||
}
|
||||
if (!encoder->crtc) {
|
||||
DRM_ERROR("missing crtc on encoder %p\n", encoder);
|
||||
return;
|
||||
}
|
||||
if (i != 0)
|
||||
DRM_DEBUG("missing for multiple monitors: no head holes\n");
|
||||
head = &qdev->monitors_config->heads[i];
|
||||
head->id = i;
|
||||
if (encoder->crtc->enabled) {
|
||||
mode = &encoder->crtc->mode;
|
||||
head->width = mode->hdisplay;
|
||||
head->height = mode->vdisplay;
|
||||
head->x = encoder->crtc->x;
|
||||
head->y = encoder->crtc->y;
|
||||
if (qdev->monitors_config->count < i + 1)
|
||||
qdev->monitors_config->count = i + 1;
|
||||
} else {
|
||||
head->width = 0;
|
||||
head->height = 0;
|
||||
head->x = 0;
|
||||
head->y = 0;
|
||||
}
|
||||
DRM_DEBUG_KMS("setting head %d to +%d+%d %dx%d out of %d\n",
|
||||
i, head->x, head->y, head->width, head->height, qdev->monitors_config->count);
|
||||
head->flags = 0;
|
||||
/* TODO - somewhere else to call this for multiple monitors
|
||||
* (config_commit?) */
|
||||
qxl_send_monitors_config(qdev);
|
||||
}
|
||||
|
||||
static void qxl_enc_commit(struct drm_encoder *encoder)
|
||||
{
|
||||
struct qxl_device *qdev = encoder->dev->dev_private;
|
||||
|
||||
qxl_write_monitors_config_for_encoder(qdev, encoder);
|
||||
DRM_DEBUG("\n");
|
||||
}
|
||||
|
||||
static void qxl_enc_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
DRM_DEBUG("\n");
|
||||
}
|
||||
|
||||
static int qxl_conn_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
unsigned pwidth = 1024;
|
||||
|
@ -1069,10 +986,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
|
|||
|
||||
|
||||
static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
|
||||
.dpms = qxl_enc_dpms,
|
||||
.prepare = qxl_enc_prepare,
|
||||
.mode_set = qxl_enc_mode_set,
|
||||
.commit = qxl_enc_commit,
|
||||
};
|
||||
|
||||
static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
|
||||
|
@ -1100,21 +1013,11 @@ static enum drm_connector_status qxl_conn_detect(
|
|||
qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
|
||||
|
||||
DRM_DEBUG("#%d connected: %d\n", output->index, connected);
|
||||
if (!connected)
|
||||
qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0);
|
||||
|
||||
return connected ? connector_status_connected
|
||||
: connector_status_disconnected;
|
||||
}
|
||||
|
||||
static int qxl_conn_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property,
|
||||
uint64_t value)
|
||||
{
|
||||
DRM_DEBUG("\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qxl_conn_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct qxl_output *qxl_output =
|
||||
|
@ -1129,7 +1032,6 @@ static const struct drm_connector_funcs qxl_connector_funcs = {
|
|||
.dpms = drm_helper_connector_dpms,
|
||||
.detect = qxl_conn_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = qxl_conn_set_property,
|
||||
.destroy = qxl_conn_destroy,
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
|
|
|
@ -299,9 +299,6 @@ struct qxl_device {
|
|||
int monitors_config_height;
|
||||
};
|
||||
|
||||
/* forward declaration for QXL_INFO_IO */
|
||||
__printf(2,3) void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
|
||||
|
||||
extern const struct drm_ioctl_desc qxl_ioctls[];
|
||||
extern int qxl_max_ioctl;
|
||||
|
||||
|
|
|
@ -185,8 +185,6 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
|
|||
/*
|
||||
* we are using a shadow draw buffer, at qdev->surface0_shadow
|
||||
*/
|
||||
qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]\n", clips->x1, clips->x2,
|
||||
clips->y1, clips->y2);
|
||||
image->dx = clips->x1;
|
||||
image->dy = clips->y1;
|
||||
image->width = clips->x2 - clips->x1;
|
||||
|
|
|
@ -57,10 +57,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
|
|||
* to avoid endless loops).
|
||||
*/
|
||||
qdev->irq_received_error++;
|
||||
qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__);
|
||||
DRM_WARN("driver is in bug mode\n");
|
||||
}
|
||||
if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
|
||||
qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n");
|
||||
schedule_work(&qdev->client_monitors_config_work);
|
||||
}
|
||||
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
|
||||
|
|
|
@ -105,16 +105,16 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
|
|||
static struct vm_operations_struct qxl_ttm_vm_ops;
|
||||
static const struct vm_operations_struct *ttm_vm_ops;
|
||||
|
||||
static int qxl_ttm_fault(struct vm_fault *vmf)
|
||||
static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
int r;
|
||||
vm_fault_t ret;
|
||||
|
||||
bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
|
||||
if (bo == NULL)
|
||||
return VM_FAULT_NOPAGE;
|
||||
r = ttm_vm_ops->fault(vmf);
|
||||
return r;
|
||||
ret = ttm_vm_ops->fault(vmf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
|
|
|
@ -76,6 +76,9 @@
|
|||
#define VOP_WIN_GET_YRGBADDR(vop, win) \
|
||||
vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
|
||||
|
||||
#define VOP_WIN_TO_INDEX(vop_win) \
|
||||
((vop_win) - (vop_win)->vop->win)
|
||||
|
||||
#define to_vop(x) container_of(x, struct vop, crtc)
|
||||
#define to_vop_win(x) container_of(x, struct vop_win, base)
|
||||
|
||||
|
@ -708,6 +711,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
|
|||
dma_addr_t dma_addr;
|
||||
uint32_t val;
|
||||
bool rb_swap;
|
||||
int win_index = VOP_WIN_TO_INDEX(vop_win);
|
||||
int format;
|
||||
|
||||
/*
|
||||
|
@ -777,7 +781,14 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
|
|||
rb_swap = has_rb_swapped(fb->format->format);
|
||||
VOP_WIN_SET(vop, win, rb_swap, rb_swap);
|
||||
|
||||
if (fb->format->has_alpha) {
|
||||
/*
|
||||
* Blending win0 with the background color doesn't seem to work
|
||||
* correctly. We only get the background color, no matter the contents
|
||||
* of the win0 framebuffer. However, blending pre-multiplied color
|
||||
* with the default opaque black default background color is a no-op,
|
||||
* so we can just disable blending to get the correct result.
|
||||
*/
|
||||
if (fb->format->has_alpha && win_index > 0) {
|
||||
VOP_WIN_SET(vop, win, dst_alpha_ctl,
|
||||
DST_FACTOR_M0(ALPHA_SRC_INVERSE));
|
||||
val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
|
||||
|
|
|
@ -1 +1 @@
|
|||
obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += test-drm_mm.o
|
||||
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm-helper.o
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* List each unit test as selftest(name, function)
|
||||
*
|
||||
* The name is used as both an enum and expanded as igt__name to create
|
||||
* a module parameter. It must be unique and legal for a C identifier.
|
||||
*
|
||||
* Tests are executed in order by igt/drm_selftests_helper
|
||||
*/
|
||||
selftest(check_plane_state, igt_check_plane_state)
|
|
@ -0,0 +1,247 @@
|
|||
/*
|
||||
* Test cases for the drm_kms_helper functions
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "drm_kms_helper: " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include <drm/drm_modes.h>
|
||||
|
||||
#define TESTS "drm_helper_selftests.h"
|
||||
#include "drm_selftest.h"
|
||||
|
||||
#define FAIL(test, msg, ...) \
|
||||
do { \
|
||||
if (test) { \
|
||||
pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
|
||||
|
||||
static void set_src(struct drm_plane_state *plane_state,
|
||||
unsigned src_x, unsigned src_y,
|
||||
unsigned src_w, unsigned src_h)
|
||||
{
|
||||
plane_state->src_x = src_x;
|
||||
plane_state->src_y = src_y;
|
||||
plane_state->src_w = src_w;
|
||||
plane_state->src_h = src_h;
|
||||
}
|
||||
|
||||
static bool check_src_eq(struct drm_plane_state *plane_state,
|
||||
unsigned src_x, unsigned src_y,
|
||||
unsigned src_w, unsigned src_h)
|
||||
{
|
||||
if (plane_state->src.x1 < 0) {
|
||||
pr_err("src x coordinate %x should never be below 0.\n", plane_state->src.x1);
|
||||
drm_rect_debug_print("src: ", &plane_state->src, true);
|
||||
return false;
|
||||
}
|
||||
if (plane_state->src.y1 < 0) {
|
||||
pr_err("src y coordinate %x should never be below 0.\n", plane_state->src.y1);
|
||||
drm_rect_debug_print("src: ", &plane_state->src, true);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (plane_state->src.x1 != src_x ||
|
||||
plane_state->src.y1 != src_y ||
|
||||
drm_rect_width(&plane_state->src) != src_w ||
|
||||
drm_rect_height(&plane_state->src) != src_h) {
|
||||
drm_rect_debug_print("src: ", &plane_state->src, true);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void set_crtc(struct drm_plane_state *plane_state,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned crtc_w, unsigned crtc_h)
|
||||
{
|
||||
plane_state->crtc_x = crtc_x;
|
||||
plane_state->crtc_y = crtc_y;
|
||||
plane_state->crtc_w = crtc_w;
|
||||
plane_state->crtc_h = crtc_h;
|
||||
}
|
||||
|
||||
static bool check_crtc_eq(struct drm_plane_state *plane_state,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned crtc_w, unsigned crtc_h)
|
||||
{
|
||||
if (plane_state->dst.x1 != crtc_x ||
|
||||
plane_state->dst.y1 != crtc_y ||
|
||||
drm_rect_width(&plane_state->dst) != crtc_w ||
|
||||
drm_rect_height(&plane_state->dst) != crtc_h) {
|
||||
drm_rect_debug_print("dst: ", &plane_state->dst, false);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int igt_check_plane_state(void *ignored)
|
||||
{
|
||||
int ret;
|
||||
|
||||
const struct drm_crtc_state crtc_state = {
|
||||
.crtc = ZERO_SIZE_PTR,
|
||||
.enable = true,
|
||||
.active = true,
|
||||
.mode = {
|
||||
DRM_MODE("1024x768", 0, 65000, 1024, 1048,
|
||||
1184, 1344, 0, 768, 771, 777, 806, 0,
|
||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
|
||||
},
|
||||
};
|
||||
struct drm_framebuffer fb = {
|
||||
.width = 2048,
|
||||
.height = 2048
|
||||
};
|
||||
struct drm_plane_state plane_state = {
|
||||
.crtc = ZERO_SIZE_PTR,
|
||||
.fb = &fb,
|
||||
.rotation = DRM_MODE_ROTATE_0
|
||||
};
|
||||
|
||||
/* Simple clipping, no scaling. */
|
||||
set_src(&plane_state, 0, 0, fb.width << 16, fb.height << 16);
|
||||
set_crtc(&plane_state, 0, 0, fb.width, fb.height);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
false, false);
|
||||
FAIL(ret < 0, "Simple clipping check should pass\n");
|
||||
FAIL_ON(!plane_state.visible);
|
||||
FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1024 << 16, 768 << 16));
|
||||
FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
|
||||
/* Rotated clipping + reflection, no scaling. */
|
||||
plane_state.rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X;
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
false, false);
|
||||
FAIL(ret < 0, "Rotated clipping check should pass\n");
|
||||
FAIL_ON(!plane_state.visible);
|
||||
FAIL_ON(!check_src_eq(&plane_state, 0, 0, 768 << 16, 1024 << 16));
|
||||
FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
plane_state.rotation = DRM_MODE_ROTATE_0;
|
||||
|
||||
/* Check whether positioning works correctly. */
|
||||
set_src(&plane_state, 0, 0, 1023 << 16, 767 << 16);
|
||||
set_crtc(&plane_state, 0, 0, 1023, 767);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
false, false);
|
||||
FAIL(!ret, "Should not be able to position on the crtc with can_position=false\n");
|
||||
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
true, false);
|
||||
FAIL(ret < 0, "Simple positioning should work\n");
|
||||
FAIL_ON(!plane_state.visible);
|
||||
FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1023 << 16, 767 << 16));
|
||||
FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1023, 767));
|
||||
|
||||
/* Simple scaling tests. */
|
||||
set_src(&plane_state, 0, 0, 512 << 16, 384 << 16);
|
||||
set_crtc(&plane_state, 0, 0, 1024, 768);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
0x8001,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
false, false);
|
||||
FAIL(!ret, "Upscaling out of range should fail.\n");
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
0x8000,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
false, false);
|
||||
FAIL(ret < 0, "Upscaling exactly 2x should work\n");
|
||||
FAIL_ON(!plane_state.visible);
|
||||
FAIL_ON(!check_src_eq(&plane_state, 0, 0, 512 << 16, 384 << 16));
|
||||
FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
|
||||
set_src(&plane_state, 0, 0, 2048 << 16, 1536 << 16);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
0x1ffff, false, false);
|
||||
FAIL(!ret, "Downscaling out of range should fail.\n");
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
0x20000, false, false);
|
||||
FAIL(ret < 0, "Should succeed with exact scaling limit\n");
|
||||
FAIL_ON(!plane_state.visible);
|
||||
FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2048 << 16, 1536 << 16));
|
||||
FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
|
||||
/* Testing rounding errors. */
|
||||
set_src(&plane_state, 0, 0, 0x40001, 0x40001);
|
||||
set_crtc(&plane_state, 1022, 766, 4, 4);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
0x10001,
|
||||
true, false);
|
||||
FAIL(ret < 0, "Should succeed by clipping to exact multiple");
|
||||
FAIL_ON(!plane_state.visible);
|
||||
FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
|
||||
FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2));
|
||||
|
||||
set_src(&plane_state, 0x20001, 0x20001, 0x4040001, 0x3040001);
|
||||
set_crtc(&plane_state, -2, -2, 1028, 772);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
0x10001,
|
||||
false, false);
|
||||
FAIL(ret < 0, "Should succeed by clipping to exact multiple");
|
||||
FAIL_ON(!plane_state.visible);
|
||||
FAIL_ON(!check_src_eq(&plane_state, 0x40002, 0x40002, 1024 << 16, 768 << 16));
|
||||
FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
|
||||
set_src(&plane_state, 0, 0, 0x3ffff, 0x3ffff);
|
||||
set_crtc(&plane_state, 1022, 766, 4, 4);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
0xffff,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
true, false);
|
||||
FAIL(ret < 0, "Should succeed by clipping to exact multiple");
|
||||
FAIL_ON(!plane_state.visible);
|
||||
/* Should not be rounded to 0x20001, which would be upscaling. */
|
||||
FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
|
||||
FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2));
|
||||
|
||||
set_src(&plane_state, 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff);
|
||||
set_crtc(&plane_state, -2, -2, 1028, 772);
|
||||
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
|
||||
0xffff,
|
||||
DRM_PLANE_HELPER_NO_SCALING,
|
||||
false, false);
|
||||
FAIL(ret < 0, "Should succeed by clipping to exact multiple");
|
||||
FAIL_ON(!plane_state.visible);
|
||||
FAIL_ON(!check_src_eq(&plane_state, 0x3fffe, 0x3fffe, 1024 << 16, 768 << 16));
|
||||
FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#include "drm_selftest.c"
|
||||
|
||||
static int __init test_drm_helper_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
|
||||
|
||||
return err > 0 ? 0 : err;
|
||||
}
|
||||
|
||||
module_init(test_drm_helper_init);
|
||||
|
||||
MODULE_AUTHOR("Intel Corporation");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -357,7 +357,7 @@ int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
|
|||
res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
|
||||
&sti_crtc_funcs, NULL);
|
||||
if (res) {
|
||||
DRM_ERROR("Can't initialze CRTC\n");
|
||||
DRM_ERROR("Can't initialize CRTC\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -445,6 +445,43 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||
reg_set(ldev->regs, LTDC_SRCR, SRCR_IMR);
|
||||
}
|
||||
|
||||
#define CLK_TOLERANCE_HZ 50
|
||||
|
||||
static enum drm_mode_status
|
||||
ltdc_crtc_mode_valid(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
|
||||
int target = mode->clock * 1000;
|
||||
int target_min = target - CLK_TOLERANCE_HZ;
|
||||
int target_max = target + CLK_TOLERANCE_HZ;
|
||||
int result;
|
||||
|
||||
/*
|
||||
* Accept all "preferred" modes:
|
||||
* - this is important for panels because panel clock tolerances are
|
||||
* bigger than hdmi ones and there is no reason to not accept them
|
||||
* (the fps may vary a little but it is not a problem).
|
||||
* - the hdmi preferred mode will be accepted too, but userland will
|
||||
* be able to use others hdmi "valid" modes if necessary.
|
||||
*/
|
||||
if (mode->type & DRM_MODE_TYPE_PREFERRED)
|
||||
return MODE_OK;
|
||||
|
||||
result = clk_round_rate(ldev->pixel_clk, target);
|
||||
|
||||
DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
|
||||
|
||||
/*
|
||||
* Filter modes according to the clock value, particularly useful for
|
||||
* hdmi modes that require precise pixel clocks.
|
||||
*/
|
||||
if (result < target_min || result > target_max)
|
||||
return MODE_CLOCK_RANGE;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static bool ltdc_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
|
@ -559,6 +596,7 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
|
||||
.mode_valid = ltdc_crtc_mode_valid,
|
||||
.mode_fixup = ltdc_crtc_mode_fixup,
|
||||
.mode_set_nofb = ltdc_crtc_mode_set_nofb,
|
||||
.atomic_flush = ltdc_crtc_atomic_flush,
|
||||
|
@ -822,13 +860,13 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
|
|||
|
||||
plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
|
||||
if (!plane)
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
ret = drm_universal_plane_init(ddev, plane, possible_crtcs,
|
||||
<dc_plane_funcs, formats, nb_fmt,
|
||||
NULL, type, NULL);
|
||||
if (ret < 0)
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
drm_plane_helper_add(plane, <dc_plane_helper_funcs);
|
||||
|
||||
|
@ -987,14 +1025,13 @@ int ltdc_load(struct drm_device *ddev)
|
|||
&bridge[i]);
|
||||
|
||||
/*
|
||||
* If at least one endpoint is ready, continue probing,
|
||||
* else if at least one endpoint is -EPROBE_DEFER and
|
||||
* there is no previous ready endpoints, defer probing.
|
||||
* If at least one endpoint is -EPROBE_DEFER, defer probing,
|
||||
* else if at least one endpoint is ready, continue probing.
|
||||
*/
|
||||
if (!ret)
|
||||
if (ret == -EPROBE_DEFER)
|
||||
return ret;
|
||||
else if (!ret)
|
||||
endpoint_not_ready = 0;
|
||||
else if (ret == -EPROBE_DEFER && endpoint_not_ready)
|
||||
endpoint_not_ready = -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
if (endpoint_not_ready)
|
||||
|
|
|
@ -85,24 +85,6 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
|
|||
/* Memory Access Control */
|
||||
mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
|
||||
|
||||
switch (mipi->rotation) {
|
||||
default:
|
||||
addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
|
||||
ILI9341_MADCTL_MX;
|
||||
break;
|
||||
case 90:
|
||||
addr_mode = ILI9341_MADCTL_MY;
|
||||
break;
|
||||
case 180:
|
||||
addr_mode = ILI9341_MADCTL_MV;
|
||||
break;
|
||||
case 270:
|
||||
addr_mode = ILI9341_MADCTL_MX;
|
||||
break;
|
||||
}
|
||||
addr_mode |= ILI9341_MADCTL_BGR;
|
||||
mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
|
||||
|
||||
/* Frame Rate */
|
||||
mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
|
||||
|
||||
|
@ -128,6 +110,29 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
|
|||
msleep(100);
|
||||
|
||||
out_enable:
|
||||
/* The PiTFT (ili9340) has a hardware reset circuit that
|
||||
* resets only on power-on and not on each reboot through
|
||||
* a gpio like the rpi-display does.
|
||||
* As a result, we need to always apply the rotation value
|
||||
* regardless of the display "on/off" state.
|
||||
*/
|
||||
switch (mipi->rotation) {
|
||||
default:
|
||||
addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
|
||||
ILI9341_MADCTL_MX;
|
||||
break;
|
||||
case 90:
|
||||
addr_mode = ILI9341_MADCTL_MY;
|
||||
break;
|
||||
case 180:
|
||||
addr_mode = ILI9341_MADCTL_MV;
|
||||
break;
|
||||
case 270:
|
||||
addr_mode = ILI9341_MADCTL_MX;
|
||||
break;
|
||||
}
|
||||
addr_mode |= ILI9341_MADCTL_BGR;
|
||||
mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
|
||||
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
config DRM_V3D
|
||||
tristate "Broadcom V3D 3.x and newer"
|
||||
depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST
|
||||
depends on DRM
|
||||
depends on COMMON_CLK
|
||||
select DRM_SCHED
|
||||
help
|
||||
Choose this option if you have a system that has a Broadcom
|
||||
V3D 3.x or newer GPU, such as BCM7268.
|
|
@ -0,0 +1,18 @@
|
|||
# Please keep these build lists sorted!
|
||||
|
||||
# core driver code
|
||||
v3d-y := \
|
||||
v3d_bo.o \
|
||||
v3d_drv.o \
|
||||
v3d_fence.o \
|
||||
v3d_gem.o \
|
||||
v3d_irq.o \
|
||||
v3d_mmu.o \
|
||||
v3d_trace_points.o \
|
||||
v3d_sched.o
|
||||
|
||||
v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o
|
||||
|
||||
obj-$(CONFIG_DRM_V3D) += v3d.o
|
||||
|
||||
CFLAGS_v3d_trace_points.o := -I$(src)
|
|
@ -0,0 +1,389 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2015-2018 Broadcom */
|
||||
|
||||
/**
|
||||
* DOC: V3D GEM BO management support
|
||||
*
|
||||
* Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the
|
||||
* GPU and the bus, allowing us to use shmem objects for our storage
|
||||
* instead of CMA.
|
||||
*
|
||||
* Physically contiguous objects may still be imported to V3D, but the
|
||||
* driver doesn't allocate physically contiguous objects on its own.
|
||||
* Display engines requiring physically contiguous allocations should
|
||||
* look into Mesa's "renderonly" support (as used by the Mesa pl111
|
||||
* driver) for an example of how to integrate with V3D.
|
||||
*
|
||||
* Long term, we should support evicting pages from the MMU when under
|
||||
* memory pressure (thus the v3d_bo_get_pages() refcounting), but
|
||||
* that's not a high priority since our systems tend to not have swap.
|
||||
*/
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/pfn_t.h>
|
||||
|
||||
#include "v3d_drv.h"
|
||||
#include "uapi/drm/v3d_drm.h"
|
||||
|
||||
/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
|
||||
* it for DMA.
|
||||
*/
|
||||
static int
|
||||
v3d_bo_get_pages(struct v3d_bo *bo)
|
||||
{
|
||||
struct drm_gem_object *obj = &bo->base;
|
||||
struct drm_device *dev = obj->dev;
|
||||
int npages = obj->size >> PAGE_SHIFT;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&bo->lock);
|
||||
if (bo->pages_refcount++ != 0)
|
||||
goto unlock;
|
||||
|
||||
if (!obj->import_attach) {
|
||||
bo->pages = drm_gem_get_pages(obj);
|
||||
if (IS_ERR(bo->pages)) {
|
||||
ret = PTR_ERR(bo->pages);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
bo->sgt = drm_prime_pages_to_sg(bo->pages, npages);
|
||||
if (IS_ERR(bo->sgt)) {
|
||||
ret = PTR_ERR(bo->sgt);
|
||||
goto put_pages;
|
||||
}
|
||||
|
||||
/* Map the pages for use by the GPU. */
|
||||
dma_map_sg(dev->dev, bo->sgt->sgl,
|
||||
bo->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
|
||||
if (!bo->pages)
|
||||
goto put_pages;
|
||||
|
||||
drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages,
|
||||
NULL, npages);
|
||||
|
||||
/* Note that dma-bufs come in mapped. */
|
||||
}
|
||||
|
||||
mutex_unlock(&bo->lock);
|
||||
|
||||
return 0;
|
||||
|
||||
put_pages:
|
||||
drm_gem_put_pages(obj, bo->pages, true, true);
|
||||
bo->pages = NULL;
|
||||
unlock:
|
||||
bo->pages_refcount--;
|
||||
mutex_unlock(&bo->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_bo_put_pages(struct v3d_bo *bo)
|
||||
{
|
||||
struct drm_gem_object *obj = &bo->base;
|
||||
|
||||
mutex_lock(&bo->lock);
|
||||
if (--bo->pages_refcount == 0) {
|
||||
if (!obj->import_attach) {
|
||||
dma_unmap_sg(obj->dev->dev, bo->sgt->sgl,
|
||||
bo->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
sg_free_table(bo->sgt);
|
||||
kfree(bo->sgt);
|
||||
drm_gem_put_pages(obj, bo->pages, true, true);
|
||||
} else {
|
||||
kfree(bo->pages);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&bo->lock);
|
||||
}
|
||||
|
||||
static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev,
|
||||
size_t unaligned_size)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
struct drm_gem_object *obj;
|
||||
struct v3d_bo *bo;
|
||||
size_t size = roundup(unaligned_size, PAGE_SIZE);
|
||||
int ret;
|
||||
|
||||
if (size == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
||||
if (!bo)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
obj = &bo->base;
|
||||
|
||||
INIT_LIST_HEAD(&bo->vmas);
|
||||
INIT_LIST_HEAD(&bo->unref_head);
|
||||
mutex_init(&bo->lock);
|
||||
|
||||
ret = drm_gem_object_init(dev, obj, size);
|
||||
if (ret)
|
||||
goto free_bo;
|
||||
|
||||
spin_lock(&v3d->mm_lock);
|
||||
ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
|
||||
obj->size >> PAGE_SHIFT,
|
||||
GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
|
||||
spin_unlock(&v3d->mm_lock);
|
||||
if (ret)
|
||||
goto free_obj;
|
||||
|
||||
return bo;
|
||||
|
||||
free_obj:
|
||||
drm_gem_object_release(obj);
|
||||
free_bo:
|
||||
kfree(bo);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
|
||||
size_t unaligned_size)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
struct drm_gem_object *obj;
|
||||
struct v3d_bo *bo;
|
||||
int ret;
|
||||
|
||||
bo = v3d_bo_create_struct(dev, unaligned_size);
|
||||
if (IS_ERR(bo))
|
||||
return bo;
|
||||
obj = &bo->base;
|
||||
|
||||
bo->resv = &bo->_resv;
|
||||
reservation_object_init(bo->resv);
|
||||
|
||||
ret = v3d_bo_get_pages(bo);
|
||||
if (ret)
|
||||
goto free_mm;
|
||||
|
||||
v3d_mmu_insert_ptes(bo);
|
||||
|
||||
mutex_lock(&v3d->bo_lock);
|
||||
v3d->bo_stats.num_allocated++;
|
||||
v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
|
||||
mutex_unlock(&v3d->bo_lock);
|
||||
|
||||
return bo;
|
||||
|
||||
free_mm:
|
||||
spin_lock(&v3d->mm_lock);
|
||||
drm_mm_remove_node(&bo->node);
|
||||
spin_unlock(&v3d->mm_lock);
|
||||
|
||||
drm_gem_object_release(obj);
|
||||
kfree(bo);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/* Called DRM core on the last userspace/kernel unreference of the
|
||||
* BO.
|
||||
*/
|
||||
void v3d_free_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
|
||||
struct v3d_bo *bo = to_v3d_bo(obj);
|
||||
|
||||
mutex_lock(&v3d->bo_lock);
|
||||
v3d->bo_stats.num_allocated--;
|
||||
v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
|
||||
mutex_unlock(&v3d->bo_lock);
|
||||
|
||||
reservation_object_fini(&bo->_resv);
|
||||
|
||||
v3d_bo_put_pages(bo);
|
||||
|
||||
if (obj->import_attach)
|
||||
drm_prime_gem_destroy(obj, bo->sgt);
|
||||
|
||||
v3d_mmu_remove_ptes(bo);
|
||||
spin_lock(&v3d->mm_lock);
|
||||
drm_mm_remove_node(&bo->node);
|
||||
spin_unlock(&v3d->mm_lock);
|
||||
|
||||
mutex_destroy(&bo->lock);
|
||||
|
||||
drm_gem_object_release(obj);
|
||||
kfree(bo);
|
||||
}
|
||||
|
||||
struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj)
|
||||
{
|
||||
struct v3d_bo *bo = to_v3d_bo(obj);
|
||||
|
||||
return bo->resv;
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
|
||||
{
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_flags |= VM_MIXEDMAP;
|
||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
}
|
||||
|
||||
int v3d_gem_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
struct v3d_bo *bo = to_v3d_bo(obj);
|
||||
unsigned long pfn;
|
||||
pgoff_t pgoff;
|
||||
int ret;
|
||||
|
||||
/* We don't use vmf->pgoff since that has the fake offset: */
|
||||
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
|
||||
pfn = page_to_pfn(bo->pages[pgoff]);
|
||||
|
||||
ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
|
||||
|
||||
switch (ret) {
|
||||
case -EAGAIN:
|
||||
case 0:
|
||||
case -ERESTARTSYS:
|
||||
case -EINTR:
|
||||
case -EBUSY:
|
||||
/*
|
||||
* EBUSY is ok: this just means that another thread
|
||||
* already did the job.
|
||||
*/
|
||||
return VM_FAULT_NOPAGE;
|
||||
case -ENOMEM:
|
||||
return VM_FAULT_OOM;
|
||||
default:
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
}
|
||||
|
||||
int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_gem_mmap(filp, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
v3d_set_mmap_vma_flags(vma);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_gem_mmap_obj(obj, obj->size, vma);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
v3d_set_mmap_vma_flags(vma);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct sg_table *
|
||||
v3d_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
struct v3d_bo *bo = to_v3d_bo(obj);
|
||||
int npages = obj->size >> PAGE_SHIFT;
|
||||
|
||||
return drm_prime_pages_to_sg(bo->pages, npages);
|
||||
}
|
||||
|
||||
struct drm_gem_object *
|
||||
v3d_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
struct v3d_bo *bo;
|
||||
|
||||
bo = v3d_bo_create_struct(dev, attach->dmabuf->size);
|
||||
if (IS_ERR(bo))
|
||||
return ERR_CAST(bo);
|
||||
obj = &bo->base;
|
||||
|
||||
bo->resv = attach->dmabuf->resv;
|
||||
|
||||
bo->sgt = sgt;
|
||||
v3d_bo_get_pages(bo);
|
||||
|
||||
v3d_mmu_insert_ptes(bo);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_v3d_create_bo *args = data;
|
||||
struct v3d_bo *bo = NULL;
|
||||
int ret;
|
||||
|
||||
if (args->flags != 0) {
|
||||
DRM_INFO("unknown create_bo flags: %d\n", args->flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size));
|
||||
if (IS_ERR(bo))
|
||||
return PTR_ERR(bo);
|
||||
|
||||
args->offset = bo->node.start << PAGE_SHIFT;
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle);
|
||||
drm_gem_object_put_unlocked(&bo->base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_v3d_mmap_bo *args = data;
|
||||
struct drm_gem_object *gem_obj;
|
||||
int ret;
|
||||
|
||||
if (args->flags != 0) {
|
||||
DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
|
||||
if (!gem_obj) {
|
||||
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
ret = drm_gem_create_mmap_offset(gem_obj);
|
||||
if (ret == 0)
|
||||
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
|
||||
drm_gem_object_put_unlocked(gem_obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_v3d_get_bo_offset *args = data;
|
||||
struct drm_gem_object *gem_obj;
|
||||
struct v3d_bo *bo;
|
||||
|
||||
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
|
||||
if (!gem_obj) {
|
||||
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
|
||||
return -ENOENT;
|
||||
}
|
||||
bo = to_v3d_bo(gem_obj);
|
||||
|
||||
args->offset = bo->node.start << PAGE_SHIFT;
|
||||
|
||||
drm_gem_object_put_unlocked(gem_obj);
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,191 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2014-2018 Broadcom */
|
||||
|
||||
#include <linux/circ_buf.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#include "v3d_drv.h"
|
||||
#include "v3d_regs.h"
|
||||
|
||||
#define REGDEF(reg) { reg, #reg }
|
||||
struct v3d_reg_def {
|
||||
u32 reg;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static const struct v3d_reg_def v3d_hub_reg_defs[] = {
|
||||
REGDEF(V3D_HUB_AXICFG),
|
||||
REGDEF(V3D_HUB_UIFCFG),
|
||||
REGDEF(V3D_HUB_IDENT0),
|
||||
REGDEF(V3D_HUB_IDENT1),
|
||||
REGDEF(V3D_HUB_IDENT2),
|
||||
REGDEF(V3D_HUB_IDENT3),
|
||||
REGDEF(V3D_HUB_INT_STS),
|
||||
REGDEF(V3D_HUB_INT_MSK_STS),
|
||||
};
|
||||
|
||||
static const struct v3d_reg_def v3d_gca_reg_defs[] = {
|
||||
REGDEF(V3D_GCA_SAFE_SHUTDOWN),
|
||||
REGDEF(V3D_GCA_SAFE_SHUTDOWN_ACK),
|
||||
};
|
||||
|
||||
static const struct v3d_reg_def v3d_core_reg_defs[] = {
|
||||
REGDEF(V3D_CTL_IDENT0),
|
||||
REGDEF(V3D_CTL_IDENT1),
|
||||
REGDEF(V3D_CTL_IDENT2),
|
||||
REGDEF(V3D_CTL_MISCCFG),
|
||||
REGDEF(V3D_CTL_INT_STS),
|
||||
REGDEF(V3D_CTL_INT_MSK_STS),
|
||||
REGDEF(V3D_CLE_CT0CS),
|
||||
REGDEF(V3D_CLE_CT0CA),
|
||||
REGDEF(V3D_CLE_CT0EA),
|
||||
REGDEF(V3D_CLE_CT1CS),
|
||||
REGDEF(V3D_CLE_CT1CA),
|
||||
REGDEF(V3D_CLE_CT1EA),
|
||||
|
||||
REGDEF(V3D_PTB_BPCA),
|
||||
REGDEF(V3D_PTB_BPCS),
|
||||
|
||||
REGDEF(V3D_MMU_CTL),
|
||||
REGDEF(V3D_MMU_VIO_ADDR),
|
||||
|
||||
REGDEF(V3D_GMP_STATUS),
|
||||
REGDEF(V3D_GMP_CFG),
|
||||
REGDEF(V3D_GMP_VIO_ADDR),
|
||||
};
|
||||
|
||||
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
int i, core;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(v3d_hub_reg_defs); i++) {
|
||||
seq_printf(m, "%s (0x%04x): 0x%08x\n",
|
||||
v3d_hub_reg_defs[i].name, v3d_hub_reg_defs[i].reg,
|
||||
V3D_READ(v3d_hub_reg_defs[i].reg));
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
|
||||
seq_printf(m, "%s (0x%04x): 0x%08x\n",
|
||||
v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
|
||||
V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
|
||||
}
|
||||
|
||||
for (core = 0; core < v3d->cores; core++) {
|
||||
for (i = 0; i < ARRAY_SIZE(v3d_core_reg_defs); i++) {
|
||||
seq_printf(m, "core %d %s (0x%04x): 0x%08x\n",
|
||||
core,
|
||||
v3d_core_reg_defs[i].name,
|
||||
v3d_core_reg_defs[i].reg,
|
||||
V3D_CORE_READ(core,
|
||||
v3d_core_reg_defs[i].reg));
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
u32 ident0, ident1, ident2, ident3, cores;
|
||||
int ret, core;
|
||||
|
||||
ret = pm_runtime_get_sync(v3d->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ident0 = V3D_READ(V3D_HUB_IDENT0);
|
||||
ident1 = V3D_READ(V3D_HUB_IDENT1);
|
||||
ident2 = V3D_READ(V3D_HUB_IDENT2);
|
||||
ident3 = V3D_READ(V3D_HUB_IDENT3);
|
||||
cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
|
||||
|
||||
seq_printf(m, "Revision: %d.%d.%d.%d\n",
|
||||
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER),
|
||||
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV),
|
||||
V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV),
|
||||
V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPIDX));
|
||||
seq_printf(m, "MMU: %s\n",
|
||||
(ident2 & V3D_HUB_IDENT2_WITH_MMU) ? "yes" : "no");
|
||||
seq_printf(m, "TFU: %s\n",
|
||||
(ident1 & V3D_HUB_IDENT1_WITH_TFU) ? "yes" : "no");
|
||||
seq_printf(m, "TSY: %s\n",
|
||||
(ident1 & V3D_HUB_IDENT1_WITH_TSY) ? "yes" : "no");
|
||||
seq_printf(m, "MSO: %s\n",
|
||||
(ident1 & V3D_HUB_IDENT1_WITH_MSO) ? "yes" : "no");
|
||||
seq_printf(m, "L3C: %s (%dkb)\n",
|
||||
(ident1 & V3D_HUB_IDENT1_WITH_L3C) ? "yes" : "no",
|
||||
V3D_GET_FIELD(ident2, V3D_HUB_IDENT2_L3C_NKB));
|
||||
|
||||
for (core = 0; core < cores; core++) {
|
||||
u32 misccfg;
|
||||
u32 nslc, ntmu, qups;
|
||||
|
||||
ident0 = V3D_CORE_READ(core, V3D_CTL_IDENT0);
|
||||
ident1 = V3D_CORE_READ(core, V3D_CTL_IDENT1);
|
||||
ident2 = V3D_CORE_READ(core, V3D_CTL_IDENT2);
|
||||
misccfg = V3D_CORE_READ(core, V3D_CTL_MISCCFG);
|
||||
|
||||
nslc = V3D_GET_FIELD(ident1, V3D_IDENT1_NSLC);
|
||||
ntmu = V3D_GET_FIELD(ident1, V3D_IDENT1_NTMU);
|
||||
qups = V3D_GET_FIELD(ident1, V3D_IDENT1_QUPS);
|
||||
|
||||
seq_printf(m, "Core %d:\n", core);
|
||||
seq_printf(m, " Revision: %d.%d\n",
|
||||
V3D_GET_FIELD(ident0, V3D_IDENT0_VER),
|
||||
V3D_GET_FIELD(ident1, V3D_IDENT1_REV));
|
||||
seq_printf(m, " Slices: %d\n", nslc);
|
||||
seq_printf(m, " TMUs: %d\n", nslc * ntmu);
|
||||
seq_printf(m, " QPUs: %d\n", nslc * qups);
|
||||
seq_printf(m, " Semaphores: %d\n",
|
||||
V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM));
|
||||
seq_printf(m, " BCG int: %d\n",
|
||||
(ident2 & V3D_IDENT2_BCG_INT) != 0);
|
||||
seq_printf(m, " Override TMU: %d\n",
|
||||
(misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(v3d->dev);
|
||||
pm_runtime_put_autosuspend(v3d->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
|
||||
mutex_lock(&v3d->bo_lock);
|
||||
seq_printf(m, "allocated bos: %d\n",
|
||||
v3d->bo_stats.num_allocated);
|
||||
seq_printf(m, "allocated bo size (kb): %ld\n",
|
||||
(long)v3d->bo_stats.pages_allocated << (PAGE_SHIFT - 10));
|
||||
mutex_unlock(&v3d->bo_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_info_list v3d_debugfs_list[] = {
|
||||
{"v3d_ident", v3d_v3d_debugfs_ident, 0},
|
||||
{"v3d_regs", v3d_v3d_debugfs_regs, 0},
|
||||
{"bo_stats", v3d_debugfs_bo_stats, 0},
|
||||
};
|
||||
|
||||
int
|
||||
v3d_debugfs_init(struct drm_minor *minor)
|
||||
{
|
||||
return drm_debugfs_create_files(v3d_debugfs_list,
|
||||
ARRAY_SIZE(v3d_debugfs_list),
|
||||
minor->debugfs_root, minor);
|
||||
}
|
|
@ -0,0 +1,371 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2014-2018 Broadcom */
|
||||
|
||||
/**
|
||||
* DOC: Broadcom V3D Graphics Driver
|
||||
*
|
||||
* This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs.
|
||||
* For V3D 2.x support, see the VC4 driver.
|
||||
*
|
||||
* Currently only single-core rendering using the binner and renderer
|
||||
* is supported. The TFU (texture formatting unit) and V3D 4.x's CSD
|
||||
* (compute shader dispatch) are not yet supported.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <drm/drm_fb_cma_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
|
||||
#include "uapi/drm/v3d_drm.h"
|
||||
#include "v3d_drv.h"
|
||||
#include "v3d_regs.h"
|
||||
|
||||
#define DRIVER_NAME "v3d"
|
||||
#define DRIVER_DESC "Broadcom V3D graphics"
|
||||
#define DRIVER_DATE "20180419"
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 0
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int v3d_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(dev);
|
||||
struct v3d_dev *v3d = to_v3d_dev(drm);
|
||||
|
||||
v3d_irq_disable(v3d);
|
||||
|
||||
clk_disable_unprepare(v3d->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int v3d_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(dev);
|
||||
struct v3d_dev *v3d = to_v3d_dev(drm);
|
||||
int ret;
|
||||
|
||||
ret = clk_prepare_enable(v3d->clk);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
/* XXX: VPM base */
|
||||
|
||||
v3d_mmu_set_page_table(v3d);
|
||||
v3d_irq_enable(v3d);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops v3d_v3d_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(v3d_runtime_suspend, v3d_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
struct drm_v3d_get_param *args = data;
|
||||
int ret;
|
||||
static const u32 reg_map[] = {
|
||||
[DRM_V3D_PARAM_V3D_UIFCFG] = V3D_HUB_UIFCFG,
|
||||
[DRM_V3D_PARAM_V3D_HUB_IDENT1] = V3D_HUB_IDENT1,
|
||||
[DRM_V3D_PARAM_V3D_HUB_IDENT2] = V3D_HUB_IDENT2,
|
||||
[DRM_V3D_PARAM_V3D_HUB_IDENT3] = V3D_HUB_IDENT3,
|
||||
[DRM_V3D_PARAM_V3D_CORE0_IDENT0] = V3D_CTL_IDENT0,
|
||||
[DRM_V3D_PARAM_V3D_CORE0_IDENT1] = V3D_CTL_IDENT1,
|
||||
[DRM_V3D_PARAM_V3D_CORE0_IDENT2] = V3D_CTL_IDENT2,
|
||||
};
|
||||
|
||||
if (args->pad != 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Note that DRM_V3D_PARAM_V3D_CORE0_IDENT0 is 0, so we need
|
||||
* to explicitly allow it in the "the register in our
|
||||
* parameter map" check.
|
||||
*/
|
||||
if (args->param < ARRAY_SIZE(reg_map) &&
|
||||
(reg_map[args->param] ||
|
||||
args->param == DRM_V3D_PARAM_V3D_CORE0_IDENT0)) {
|
||||
u32 offset = reg_map[args->param];
|
||||
|
||||
if (args->value != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = pm_runtime_get_sync(v3d->dev);
|
||||
if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
|
||||
args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) {
|
||||
args->value = V3D_CORE_READ(0, offset);
|
||||
} else {
|
||||
args->value = V3D_READ(offset);
|
||||
}
|
||||
pm_runtime_mark_last_busy(v3d->dev);
|
||||
pm_runtime_put_autosuspend(v3d->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Any params that aren't just register reads would go here. */
|
||||
|
||||
DRM_DEBUG("Unknown parameter %d\n", args->param);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
v3d_open(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
struct v3d_file_priv *v3d_priv;
|
||||
int i;
|
||||
|
||||
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
|
||||
if (!v3d_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
v3d_priv->v3d = v3d;
|
||||
|
||||
for (i = 0; i < V3D_MAX_QUEUES; i++) {
|
||||
drm_sched_entity_init(&v3d->queue[i].sched,
|
||||
&v3d_priv->sched_entity[i],
|
||||
&v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
|
||||
32, NULL);
|
||||
}
|
||||
|
||||
file->driver_priv = v3d_priv;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
struct v3d_file_priv *v3d_priv = file->driver_priv;
|
||||
enum v3d_queue q;
|
||||
|
||||
for (q = 0; q < V3D_MAX_QUEUES; q++) {
|
||||
drm_sched_entity_fini(&v3d->queue[q].sched,
|
||||
&v3d_priv->sched_entity[q]);
|
||||
}
|
||||
|
||||
kfree(v3d_priv);
|
||||
}
|
||||
|
||||
static const struct file_operations v3d_drm_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = v3d_mmap,
|
||||
.poll = drm_poll,
|
||||
.read = drm_read,
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
|
||||
* protection between clients. Note that render nodes would be be
|
||||
* able to submit CLs that could access BOs from clients authenticated
|
||||
* with the master node.
|
||||
*/
|
||||
static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(V3D_WAIT_BO, v3d_wait_bo_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(V3D_CREATE_BO, v3d_create_bo_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
static const struct vm_operations_struct v3d_vm_ops = {
|
||||
.fault = v3d_gem_fault,
|
||||
.open = drm_gem_vm_open,
|
||||
.close = drm_gem_vm_close,
|
||||
};
|
||||
|
||||
static struct drm_driver v3d_drm_driver = {
|
||||
.driver_features = (DRIVER_GEM |
|
||||
DRIVER_RENDER |
|
||||
DRIVER_PRIME |
|
||||
DRIVER_SYNCOBJ),
|
||||
|
||||
.open = v3d_open,
|
||||
.postclose = v3d_postclose,
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
.debugfs_init = v3d_debugfs_init,
|
||||
#endif
|
||||
|
||||
.gem_free_object_unlocked = v3d_free_object,
|
||||
.gem_vm_ops = &v3d_vm_ops,
|
||||
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_import = drm_gem_prime_import,
|
||||
.gem_prime_export = drm_gem_prime_export,
|
||||
.gem_prime_res_obj = v3d_prime_res_obj,
|
||||
.gem_prime_get_sg_table = v3d_prime_get_sg_table,
|
||||
.gem_prime_import_sg_table = v3d_prime_import_sg_table,
|
||||
.gem_prime_mmap = v3d_prime_mmap,
|
||||
|
||||
.ioctls = v3d_drm_ioctls,
|
||||
.num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
|
||||
.fops = &v3d_drm_fops,
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static const struct of_device_id v3d_of_match[] = {
|
||||
{ .compatible = "brcm,7268-v3d" },
|
||||
{ .compatible = "brcm,7278-v3d" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, v3d_of_match);
|
||||
|
||||
static int
|
||||
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
|
||||
{
|
||||
struct resource *res =
|
||||
platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name);
|
||||
|
||||
*regs = devm_ioremap_resource(v3d->dev, res);
|
||||
return PTR_ERR_OR_ZERO(*regs);
|
||||
}
|
||||
|
||||
static int v3d_platform_drm_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct drm_device *drm;
|
||||
struct v3d_dev *v3d;
|
||||
int ret;
|
||||
u32 ident1;
|
||||
|
||||
dev->coherent_dma_mask = DMA_BIT_MASK(36);
|
||||
|
||||
v3d = kzalloc(sizeof(*v3d), GFP_KERNEL);
|
||||
if (!v3d)
|
||||
return -ENOMEM;
|
||||
v3d->dev = dev;
|
||||
v3d->pdev = pdev;
|
||||
drm = &v3d->drm;
|
||||
|
||||
ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
|
||||
if (ret)
|
||||
goto dev_free;
|
||||
|
||||
ret = map_regs(v3d, &v3d->hub_regs, "hub");
|
||||
if (ret)
|
||||
goto dev_free;
|
||||
|
||||
ret = map_regs(v3d, &v3d->core_regs[0], "core0");
|
||||
if (ret)
|
||||
goto dev_free;
|
||||
|
||||
ident1 = V3D_READ(V3D_HUB_IDENT1);
|
||||
v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 +
|
||||
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV));
|
||||
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
|
||||
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
|
||||
|
||||
if (v3d->ver < 41) {
|
||||
ret = map_regs(v3d, &v3d->gca_regs, "gca");
|
||||
if (ret)
|
||||
goto dev_free;
|
||||
}
|
||||
|
||||
v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
|
||||
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
|
||||
if (!v3d->mmu_scratch) {
|
||||
dev_err(dev, "Failed to allocate MMU scratch page\n");
|
||||
ret = -ENOMEM;
|
||||
goto dev_free;
|
||||
}
|
||||
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_runtime_set_autosuspend_delay(dev, 50);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev);
|
||||
if (ret)
|
||||
goto dma_free;
|
||||
|
||||
platform_set_drvdata(pdev, drm);
|
||||
drm->dev_private = v3d;
|
||||
|
||||
ret = v3d_gem_init(drm);
|
||||
if (ret)
|
||||
goto dev_destroy;
|
||||
|
||||
v3d_irq_init(v3d);
|
||||
|
||||
ret = drm_dev_register(drm, 0);
|
||||
if (ret)
|
||||
goto gem_destroy;
|
||||
|
||||
return 0;
|
||||
|
||||
gem_destroy:
|
||||
v3d_gem_destroy(drm);
|
||||
dev_destroy:
|
||||
drm_dev_put(drm);
|
||||
dma_free:
|
||||
dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
|
||||
dev_free:
|
||||
kfree(v3d);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int v3d_platform_drm_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct drm_device *drm = platform_get_drvdata(pdev);
|
||||
struct v3d_dev *v3d = to_v3d_dev(drm);
|
||||
|
||||
drm_dev_unregister(drm);
|
||||
|
||||
v3d_gem_destroy(drm);
|
||||
|
||||
drm_dev_put(drm);
|
||||
|
||||
dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver v3d_platform_driver = {
|
||||
.probe = v3d_platform_drm_probe,
|
||||
.remove = v3d_platform_drm_remove,
|
||||
.driver = {
|
||||
.name = "v3d",
|
||||
.of_match_table = v3d_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init v3d_drm_register(void)
|
||||
{
|
||||
return platform_driver_register(&v3d_platform_driver);
|
||||
}
|
||||
|
||||
static void __exit v3d_drm_unregister(void)
|
||||
{
|
||||
platform_driver_unregister(&v3d_platform_driver);
|
||||
}
|
||||
|
||||
module_init(v3d_drm_register);
|
||||
module_exit(v3d_drm_unregister);
|
||||
|
||||
MODULE_ALIAS("platform:v3d-drm");
|
||||
MODULE_DESCRIPTION("Broadcom V3D DRM Driver");
|
||||
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -0,0 +1,294 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2015-2018 Broadcom */
|
||||
|
||||
#include <linux/reservation.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/gpu_scheduler.h>
|
||||
|
||||
#define GMP_GRANULARITY (128 * 1024)
|
||||
|
||||
/* Enum for each of the V3D queues. We maintain various queue
|
||||
* tracking as an array because at some point we'll want to support
|
||||
* the TFU (texture formatting unit) as another queue.
|
||||
*/
|
||||
enum v3d_queue {
|
||||
V3D_BIN,
|
||||
V3D_RENDER,
|
||||
};
|
||||
|
||||
#define V3D_MAX_QUEUES (V3D_RENDER + 1)
|
||||
|
||||
struct v3d_queue_state {
|
||||
struct drm_gpu_scheduler sched;
|
||||
|
||||
u64 fence_context;
|
||||
u64 emit_seqno;
|
||||
u64 finished_seqno;
|
||||
};
|
||||
|
||||
struct v3d_dev {
|
||||
struct drm_device drm;
|
||||
|
||||
/* Short representation (e.g. 33, 41) of the V3D tech version
|
||||
* and revision.
|
||||
*/
|
||||
int ver;
|
||||
|
||||
struct device *dev;
|
||||
struct platform_device *pdev;
|
||||
void __iomem *hub_regs;
|
||||
void __iomem *core_regs[3];
|
||||
void __iomem *bridge_regs;
|
||||
void __iomem *gca_regs;
|
||||
struct clk *clk;
|
||||
|
||||
/* Virtual and DMA addresses of the single shared page table. */
|
||||
volatile u32 *pt;
|
||||
dma_addr_t pt_paddr;
|
||||
|
||||
/* Virtual and DMA addresses of the MMU's scratch page. When
|
||||
* a read or write is invalid in the MMU, it will be
|
||||
* redirected here.
|
||||
*/
|
||||
void *mmu_scratch;
|
||||
dma_addr_t mmu_scratch_paddr;
|
||||
|
||||
/* Number of V3D cores. */
|
||||
u32 cores;
|
||||
|
||||
/* Allocator managing the address space. All units are in
|
||||
* number of pages.
|
||||
*/
|
||||
struct drm_mm mm;
|
||||
spinlock_t mm_lock;
|
||||
|
||||
struct work_struct overflow_mem_work;
|
||||
|
||||
struct v3d_exec_info *bin_job;
|
||||
struct v3d_exec_info *render_job;
|
||||
|
||||
struct v3d_queue_state queue[V3D_MAX_QUEUES];
|
||||
|
||||
/* Spinlock used to synchronize the overflow memory
|
||||
* management against bin job submission.
|
||||
*/
|
||||
spinlock_t job_lock;
|
||||
|
||||
/* Protects bo_stats */
|
||||
struct mutex bo_lock;
|
||||
|
||||
/* Lock taken when resetting the GPU, to keep multiple
|
||||
* processes from trying to park the scheduler threads and
|
||||
* reset at once.
|
||||
*/
|
||||
struct mutex reset_lock;
|
||||
|
||||
struct {
|
||||
u32 num_allocated;
|
||||
u32 pages_allocated;
|
||||
} bo_stats;
|
||||
};
|
||||
|
||||
static inline struct v3d_dev *
|
||||
to_v3d_dev(struct drm_device *dev)
|
||||
{
|
||||
return (struct v3d_dev *)dev->dev_private;
|
||||
}
|
||||
|
||||
/* The per-fd struct, which tracks the MMU mappings. */
|
||||
struct v3d_file_priv {
|
||||
struct v3d_dev *v3d;
|
||||
|
||||
struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
|
||||
};
|
||||
|
||||
/* Tracks a mapping of a BO into a per-fd address space */
|
||||
struct v3d_vma {
|
||||
struct v3d_page_table *pt;
|
||||
struct list_head list; /* entry in v3d_bo.vmas */
|
||||
};
|
||||
|
||||
struct v3d_bo {
|
||||
struct drm_gem_object base;
|
||||
|
||||
struct mutex lock;
|
||||
|
||||
struct drm_mm_node node;
|
||||
|
||||
u32 pages_refcount;
|
||||
struct page **pages;
|
||||
struct sg_table *sgt;
|
||||
void *vaddr;
|
||||
|
||||
struct list_head vmas; /* list of v3d_vma */
|
||||
|
||||
/* List entry for the BO's position in
|
||||
* v3d_exec_info->unref_list
|
||||
*/
|
||||
struct list_head unref_head;
|
||||
|
||||
/* normally (resv == &_resv) except for imported bo's */
|
||||
struct reservation_object *resv;
|
||||
struct reservation_object _resv;
|
||||
};
|
||||
|
||||
static inline struct v3d_bo *
|
||||
to_v3d_bo(struct drm_gem_object *bo)
|
||||
{
|
||||
return (struct v3d_bo *)bo;
|
||||
}
|
||||
|
||||
struct v3d_fence {
|
||||
struct dma_fence base;
|
||||
struct drm_device *dev;
|
||||
/* v3d seqno for signaled() test */
|
||||
u64 seqno;
|
||||
enum v3d_queue queue;
|
||||
};
|
||||
|
||||
static inline struct v3d_fence *
|
||||
to_v3d_fence(struct dma_fence *fence)
|
||||
{
|
||||
return (struct v3d_fence *)fence;
|
||||
}
|
||||
|
||||
#define V3D_READ(offset) readl(v3d->hub_regs + offset)
|
||||
#define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset)
|
||||
|
||||
#define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset)
|
||||
#define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset)
|
||||
|
||||
#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
|
||||
#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
|
||||
|
||||
#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
|
||||
#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
|
||||
|
||||
struct v3d_job {
|
||||
struct drm_sched_job base;
|
||||
|
||||
struct v3d_exec_info *exec;
|
||||
|
||||
/* An optional fence userspace can pass in for the job to depend on. */
|
||||
struct dma_fence *in_fence;
|
||||
|
||||
/* v3d fence to be signaled by IRQ handler when the job is complete. */
|
||||
struct dma_fence *done_fence;
|
||||
|
||||
/* GPU virtual addresses of the start/end of the CL job. */
|
||||
u32 start, end;
|
||||
};
|
||||
|
||||
struct v3d_exec_info {
|
||||
struct v3d_dev *v3d;
|
||||
|
||||
struct v3d_job bin, render;
|
||||
|
||||
/* Fence for when the scheduler considers the binner to be
|
||||
* done, for render to depend on.
|
||||
*/
|
||||
struct dma_fence *bin_done_fence;
|
||||
|
||||
struct kref refcount;
|
||||
|
||||
/* This is the array of BOs that were looked up at the start of exec. */
|
||||
struct v3d_bo **bo;
|
||||
u32 bo_count;
|
||||
|
||||
/* List of overflow BOs used in the job that need to be
|
||||
* released once the job is complete.
|
||||
*/
|
||||
struct list_head unref_list;
|
||||
|
||||
/* Submitted tile memory allocation start/size, tile state. */
|
||||
u32 qma, qms, qts;
|
||||
};
|
||||
|
||||
/**
|
||||
* _wait_for - magic (register) wait macro
|
||||
*
|
||||
* Does the right thing for modeset paths when run under kdgb or similar atomic
|
||||
* contexts. Note that it's important that we check the condition again after
|
||||
* having timed out, since the timeout could be due to preemption or similar and
|
||||
* we've never had a chance to check the condition before the timeout.
|
||||
*/
|
||||
#define wait_for(COND, MS) ({ \
|
||||
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
|
||||
int ret__ = 0; \
|
||||
while (!(COND)) { \
|
||||
if (time_after(jiffies, timeout__)) { \
|
||||
if (!(COND)) \
|
||||
ret__ = -ETIMEDOUT; \
|
||||
break; \
|
||||
} \
|
||||
msleep(1); \
|
||||
} \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
|
||||
{
|
||||
/* nsecs_to_jiffies64() does not guard against overflow */
|
||||
if (NSEC_PER_SEC % HZ &&
|
||||
div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
|
||||
return MAX_JIFFY_OFFSET;
|
||||
|
||||
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
|
||||
}
|
||||
|
||||
/* v3d_bo.c */
|
||||
void v3d_free_object(struct drm_gem_object *gem_obj);
|
||||
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
|
||||
size_t size);
|
||||
int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int v3d_gem_fault(struct vm_fault *vmf);
|
||||
int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
|
||||
int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||
struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt);
|
||||
|
||||
/* v3d_debugfs.c */
|
||||
int v3d_debugfs_init(struct drm_minor *minor);
|
||||
|
||||
/* v3d_fence.c */
|
||||
extern const struct dma_fence_ops v3d_fence_ops;
|
||||
struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
|
||||
|
||||
/* v3d_gem.c */
|
||||
int v3d_gem_init(struct drm_device *dev);
|
||||
void v3d_gem_destroy(struct drm_device *dev);
|
||||
int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void v3d_exec_put(struct v3d_exec_info *exec);
|
||||
void v3d_reset(struct v3d_dev *v3d);
|
||||
void v3d_invalidate_caches(struct v3d_dev *v3d);
|
||||
void v3d_flush_caches(struct v3d_dev *v3d);
|
||||
|
||||
/* v3d_irq.c */
|
||||
void v3d_irq_init(struct v3d_dev *v3d);
|
||||
void v3d_irq_enable(struct v3d_dev *v3d);
|
||||
void v3d_irq_disable(struct v3d_dev *v3d);
|
||||
void v3d_irq_reset(struct v3d_dev *v3d);
|
||||
|
||||
/* v3d_mmu.c */
|
||||
int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
|
||||
u32 *offset);
|
||||
int v3d_mmu_set_page_table(struct v3d_dev *v3d);
|
||||
void v3d_mmu_insert_ptes(struct v3d_bo *bo);
|
||||
void v3d_mmu_remove_ptes(struct v3d_bo *bo);
|
||||
|
||||
/* v3d_sched.c */
|
||||
int v3d_sched_init(struct v3d_dev *v3d);
|
||||
void v3d_sched_fini(struct v3d_dev *v3d);
|
|
@ -0,0 +1,58 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2017-2018 Broadcom */
|
||||
|
||||
#include "v3d_drv.h"
|
||||
|
||||
struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue)
|
||||
{
|
||||
struct v3d_fence *fence;
|
||||
|
||||
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
|
||||
if (!fence)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
fence->dev = &v3d->drm;
|
||||
fence->queue = queue;
|
||||
fence->seqno = ++v3d->queue[queue].emit_seqno;
|
||||
dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock,
|
||||
v3d->queue[queue].fence_context, fence->seqno);
|
||||
|
||||
return &fence->base;
|
||||
}
|
||||
|
||||
static const char *v3d_fence_get_driver_name(struct dma_fence *fence)
|
||||
{
|
||||
return "v3d";
|
||||
}
|
||||
|
||||
static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
|
||||
{
|
||||
struct v3d_fence *f = to_v3d_fence(fence);
|
||||
|
||||
if (f->queue == V3D_BIN)
|
||||
return "v3d-bin";
|
||||
else
|
||||
return "v3d-render";
|
||||
}
|
||||
|
||||
static bool v3d_fence_enable_signaling(struct dma_fence *fence)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool v3d_fence_signaled(struct dma_fence *fence)
|
||||
{
|
||||
struct v3d_fence *f = to_v3d_fence(fence);
|
||||
struct v3d_dev *v3d = to_v3d_dev(f->dev);
|
||||
|
||||
return v3d->queue[f->queue].finished_seqno >= f->seqno;
|
||||
}
|
||||
|
||||
const struct dma_fence_ops v3d_fence_ops = {
|
||||
.get_driver_name = v3d_fence_get_driver_name,
|
||||
.get_timeline_name = v3d_fence_get_timeline_name,
|
||||
.enable_signaling = v3d_fence_enable_signaling,
|
||||
.signaled = v3d_fence_signaled,
|
||||
.wait = dma_fence_default_wait,
|
||||
.release = dma_fence_free,
|
||||
};
|
|
@ -0,0 +1,668 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2014-2018 Broadcom */
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_syncobj.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
||||
#include "uapi/drm/v3d_drm.h"
|
||||
#include "v3d_drv.h"
|
||||
#include "v3d_regs.h"
|
||||
#include "v3d_trace.h"
|
||||
|
||||
static void
|
||||
v3d_init_core(struct v3d_dev *v3d, int core)
|
||||
{
|
||||
/* Set OVRTMUOUT, which means that the texture sampler uniform
|
||||
* configuration's tmu output type field is used, instead of
|
||||
* using the hardware default behavior based on the texture
|
||||
* type. If you want the default behavior, you can still put
|
||||
* "2" in the indirect texture state's output_type field.
|
||||
*/
|
||||
V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
|
||||
|
||||
/* Whenever we flush the L2T cache, we always want to flush
|
||||
* the whole thing.
|
||||
*/
|
||||
V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
|
||||
V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
|
||||
}
|
||||
|
||||
/* Sets invariant state for the HW. */
|
||||
static void
|
||||
v3d_init_hw_state(struct v3d_dev *v3d)
|
||||
{
|
||||
v3d_init_core(v3d, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_idle_axi(struct v3d_dev *v3d, int core)
|
||||
{
|
||||
V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
|
||||
|
||||
if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
|
||||
(V3D_GMP_STATUS_RD_COUNT_MASK |
|
||||
V3D_GMP_STATUS_WR_COUNT_MASK |
|
||||
V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
|
||||
DRM_ERROR("Failed to wait for safe GMP shutdown\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_idle_gca(struct v3d_dev *v3d)
|
||||
{
|
||||
if (v3d->ver >= 41)
|
||||
return;
|
||||
|
||||
V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
|
||||
|
||||
if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
|
||||
V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
|
||||
V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
|
||||
DRM_ERROR("Failed to wait for safe GCA shutdown\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_reset_v3d(struct v3d_dev *v3d)
|
||||
{
|
||||
int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
|
||||
|
||||
if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
|
||||
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
|
||||
V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
|
||||
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
|
||||
|
||||
/* GFXH-1383: The SW_INIT may cause a stray write to address 0
|
||||
* of the unit, so reset it to its power-on value here.
|
||||
*/
|
||||
V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
|
||||
} else {
|
||||
WARN_ON_ONCE(V3D_GET_FIELD(version,
|
||||
V3D_TOP_GR_BRIDGE_MAJOR) != 7);
|
||||
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
|
||||
V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
|
||||
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
|
||||
}
|
||||
|
||||
v3d_init_hw_state(v3d);
|
||||
}
|
||||
|
||||
void
|
||||
v3d_reset(struct v3d_dev *v3d)
|
||||
{
|
||||
struct drm_device *dev = &v3d->drm;
|
||||
|
||||
DRM_ERROR("Resetting GPU.\n");
|
||||
trace_v3d_reset_begin(dev);
|
||||
|
||||
/* XXX: only needed for safe powerdown, not reset. */
|
||||
if (false)
|
||||
v3d_idle_axi(v3d, 0);
|
||||
|
||||
v3d_idle_gca(v3d);
|
||||
v3d_reset_v3d(v3d);
|
||||
|
||||
v3d_mmu_set_page_table(v3d);
|
||||
v3d_irq_reset(v3d);
|
||||
|
||||
trace_v3d_reset_end(dev);
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_flush_l3(struct v3d_dev *v3d)
|
||||
{
|
||||
if (v3d->ver < 41) {
|
||||
u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
|
||||
|
||||
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
|
||||
gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
|
||||
|
||||
if (v3d->ver < 33) {
|
||||
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
|
||||
gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Invalidates the (read-only) L2 cache. */
|
||||
static void
|
||||
v3d_invalidate_l2(struct v3d_dev *v3d, int core)
|
||||
{
|
||||
V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
|
||||
V3D_L2CACTL_L2CCLR |
|
||||
V3D_L2CACTL_L2CENA);
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_invalidate_l1td(struct v3d_dev *v3d, int core)
|
||||
{
|
||||
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
|
||||
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
|
||||
V3D_L2TCACTL_L2TFLS), 100)) {
|
||||
DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* Invalidates texture L2 cachelines */
|
||||
static void
|
||||
v3d_flush_l2t(struct v3d_dev *v3d, int core)
|
||||
{
|
||||
v3d_invalidate_l1td(v3d, core);
|
||||
|
||||
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
|
||||
V3D_L2TCACTL_L2TFLS |
|
||||
V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
|
||||
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
|
||||
V3D_L2TCACTL_L2TFLS), 100)) {
|
||||
DRM_ERROR("Timeout waiting for L2T flush\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* Invalidates the slice caches. These are read-only caches. */
|
||||
static void
|
||||
v3d_invalidate_slices(struct v3d_dev *v3d, int core)
|
||||
{
|
||||
V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
|
||||
V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
|
||||
V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
|
||||
V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
|
||||
V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
|
||||
}
|
||||
|
||||
/* Invalidates texture L2 cachelines */
|
||||
static void
|
||||
v3d_invalidate_l2t(struct v3d_dev *v3d, int core)
|
||||
{
|
||||
V3D_CORE_WRITE(core,
|
||||
V3D_CTL_L2TCACTL,
|
||||
V3D_L2TCACTL_L2TFLS |
|
||||
V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAR, V3D_L2TCACTL_FLM));
|
||||
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
|
||||
V3D_L2TCACTL_L2TFLS), 100)) {
|
||||
DRM_ERROR("Timeout waiting for L2T invalidate\n");
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
v3d_invalidate_caches(struct v3d_dev *v3d)
|
||||
{
|
||||
v3d_flush_l3(v3d);
|
||||
|
||||
v3d_invalidate_l2(v3d, 0);
|
||||
v3d_invalidate_slices(v3d, 0);
|
||||
v3d_flush_l2t(v3d, 0);
|
||||
}
|
||||
|
||||
void
|
||||
v3d_flush_caches(struct v3d_dev *v3d)
|
||||
{
|
||||
v3d_invalidate_l1td(v3d, 0);
|
||||
v3d_invalidate_l2t(v3d, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_attach_object_fences(struct v3d_exec_info *exec)
|
||||
{
|
||||
struct dma_fence *out_fence = &exec->render.base.s_fence->finished;
|
||||
struct v3d_bo *bo;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < exec->bo_count; i++) {
|
||||
bo = to_v3d_bo(&exec->bo[i]->base);
|
||||
|
||||
/* XXX: Use shared fences for read-only objects. */
|
||||
reservation_object_add_excl_fence(bo->resv, out_fence);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_unlock_bo_reservations(struct drm_device *dev,
|
||||
struct v3d_exec_info *exec,
|
||||
struct ww_acquire_ctx *acquire_ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < exec->bo_count; i++) {
|
||||
struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base);
|
||||
|
||||
ww_mutex_unlock(&bo->resv->lock);
|
||||
}
|
||||
|
||||
ww_acquire_fini(acquire_ctx);
|
||||
}
|
||||
|
||||
/* Takes the reservation lock on all the BOs being referenced, so that
|
||||
* at queue submit time we can update the reservations.
|
||||
*
|
||||
* We don't lock the RCL the tile alloc/state BOs, or overflow memory
|
||||
* (all of which are on exec->unref_list). They're entirely private
|
||||
* to v3d, so we don't attach dma-buf fences to them.
|
||||
*/
|
||||
static int
|
||||
v3d_lock_bo_reservations(struct drm_device *dev,
|
||||
struct v3d_exec_info *exec,
|
||||
struct ww_acquire_ctx *acquire_ctx)
|
||||
{
|
||||
int contended_lock = -1;
|
||||
int i, ret;
|
||||
struct v3d_bo *bo;
|
||||
|
||||
ww_acquire_init(acquire_ctx, &reservation_ww_class);
|
||||
|
||||
retry:
|
||||
if (contended_lock != -1) {
|
||||
bo = to_v3d_bo(&exec->bo[contended_lock]->base);
|
||||
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
|
||||
acquire_ctx);
|
||||
if (ret) {
|
||||
ww_acquire_done(acquire_ctx);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < exec->bo_count; i++) {
|
||||
if (i == contended_lock)
|
||||
continue;
|
||||
|
||||
bo = to_v3d_bo(&exec->bo[i]->base);
|
||||
|
||||
ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
|
||||
if (ret) {
|
||||
int j;
|
||||
|
||||
for (j = 0; j < i; j++) {
|
||||
bo = to_v3d_bo(&exec->bo[j]->base);
|
||||
ww_mutex_unlock(&bo->resv->lock);
|
||||
}
|
||||
|
||||
if (contended_lock != -1 && contended_lock >= i) {
|
||||
bo = to_v3d_bo(&exec->bo[contended_lock]->base);
|
||||
|
||||
ww_mutex_unlock(&bo->resv->lock);
|
||||
}
|
||||
|
||||
if (ret == -EDEADLK) {
|
||||
contended_lock = i;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
ww_acquire_done(acquire_ctx);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ww_acquire_done(acquire_ctx);
|
||||
|
||||
/* Reserve space for our shared (read-only) fence references,
|
||||
* before we commit the CL to the hardware.
|
||||
*/
|
||||
for (i = 0; i < exec->bo_count; i++) {
|
||||
bo = to_v3d_bo(&exec->bo[i]->base);
|
||||
|
||||
ret = reservation_object_reserve_shared(bo->resv);
|
||||
if (ret) {
|
||||
v3d_unlock_bo_reservations(dev, exec, acquire_ctx);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
|
||||
* referenced by the job.
|
||||
* @dev: DRM device
|
||||
* @file_priv: DRM file for this fd
|
||||
* @exec: V3D job being set up
|
||||
*
|
||||
* The command validator needs to reference BOs by their index within
|
||||
* the submitted job's BO list. This does the validation of the job's
|
||||
* BO list and reference counting for the lifetime of the job.
|
||||
*
|
||||
* Note that this function doesn't need to unreference the BOs on
|
||||
* failure, because that will happen at v3d_exec_cleanup() time.
|
||||
*/
|
||||
static int
|
||||
v3d_cl_lookup_bos(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_v3d_submit_cl *args,
|
||||
struct v3d_exec_info *exec)
|
||||
{
|
||||
u32 *handles;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
exec->bo_count = args->bo_handle_count;
|
||||
|
||||
if (!exec->bo_count) {
|
||||
/* See comment on bo_index for why we have to check
|
||||
* this.
|
||||
*/
|
||||
DRM_DEBUG("Rendering requires BOs\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
exec->bo = kvmalloc_array(exec->bo_count,
|
||||
sizeof(struct drm_gem_cma_object *),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!exec->bo) {
|
||||
DRM_DEBUG("Failed to allocate validated BO pointers\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL);
|
||||
if (!handles) {
|
||||
ret = -ENOMEM;
|
||||
DRM_DEBUG("Failed to allocate incoming GEM handles\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (copy_from_user(handles,
|
||||
(void __user *)(uintptr_t)args->bo_handles,
|
||||
exec->bo_count * sizeof(u32))) {
|
||||
ret = -EFAULT;
|
||||
DRM_DEBUG("Failed to copy in GEM handles\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
spin_lock(&file_priv->table_lock);
|
||||
for (i = 0; i < exec->bo_count; i++) {
|
||||
struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
|
||||
handles[i]);
|
||||
if (!bo) {
|
||||
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
|
||||
i, handles[i]);
|
||||
ret = -ENOENT;
|
||||
spin_unlock(&file_priv->table_lock);
|
||||
goto fail;
|
||||
}
|
||||
drm_gem_object_get(bo);
|
||||
exec->bo[i] = to_v3d_bo(bo);
|
||||
}
|
||||
spin_unlock(&file_priv->table_lock);
|
||||
|
||||
fail:
|
||||
kvfree(handles);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_exec_cleanup(struct kref *ref)
|
||||
{
|
||||
struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info,
|
||||
refcount);
|
||||
struct v3d_dev *v3d = exec->v3d;
|
||||
unsigned int i;
|
||||
struct v3d_bo *bo, *save;
|
||||
|
||||
dma_fence_put(exec->bin.in_fence);
|
||||
dma_fence_put(exec->render.in_fence);
|
||||
|
||||
dma_fence_put(exec->bin.done_fence);
|
||||
dma_fence_put(exec->render.done_fence);
|
||||
|
||||
dma_fence_put(exec->bin_done_fence);
|
||||
|
||||
for (i = 0; i < exec->bo_count; i++)
|
||||
drm_gem_object_put_unlocked(&exec->bo[i]->base);
|
||||
kvfree(exec->bo);
|
||||
|
||||
list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
|
||||
drm_gem_object_put_unlocked(&bo->base);
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(v3d->dev);
|
||||
pm_runtime_put_autosuspend(v3d->dev);
|
||||
|
||||
kfree(exec);
|
||||
}
|
||||
|
||||
void v3d_exec_put(struct v3d_exec_info *exec)
|
||||
{
|
||||
kref_put(&exec->refcount, v3d_exec_cleanup);
|
||||
}
|
||||
|
||||
int
|
||||
v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
int ret;
|
||||
struct drm_v3d_wait_bo *args = data;
|
||||
struct drm_gem_object *gem_obj;
|
||||
struct v3d_bo *bo;
|
||||
ktime_t start = ktime_get();
|
||||
u64 delta_ns;
|
||||
unsigned long timeout_jiffies =
|
||||
nsecs_to_jiffies_timeout(args->timeout_ns);
|
||||
|
||||
if (args->pad != 0)
|
||||
return -EINVAL;
|
||||
|
||||
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
|
||||
if (!gem_obj) {
|
||||
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
bo = to_v3d_bo(gem_obj);
|
||||
|
||||
ret = reservation_object_wait_timeout_rcu(bo->resv,
|
||||
true, true,
|
||||
timeout_jiffies);
|
||||
|
||||
if (ret == 0)
|
||||
ret = -ETIME;
|
||||
else if (ret > 0)
|
||||
ret = 0;
|
||||
|
||||
/* Decrement the user's timeout, in case we got interrupted
|
||||
* such that the ioctl will be restarted.
|
||||
*/
|
||||
delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
|
||||
if (delta_ns < args->timeout_ns)
|
||||
args->timeout_ns -= delta_ns;
|
||||
else
|
||||
args->timeout_ns = 0;
|
||||
|
||||
/* Asked to wait beyond the jiffie/scheduler precision? */
|
||||
if (ret == -ETIME && args->timeout_ns)
|
||||
ret = -EAGAIN;
|
||||
|
||||
drm_gem_object_put_unlocked(gem_obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
|
||||
* @dev: DRM device
|
||||
* @data: ioctl argument
|
||||
* @file_priv: DRM file for this fd
|
||||
*
|
||||
* This is the main entrypoint for userspace to submit a 3D frame to
|
||||
* the GPU. Userspace provides the binner command list (if
|
||||
* applicable), and the kernel sets up the render command list to draw
|
||||
* to the framebuffer described in the ioctl, using the command lists
|
||||
* that the 3D engine's binner will produce.
|
||||
*/
|
||||
int
|
||||
v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
|
||||
struct drm_v3d_submit_cl *args = data;
|
||||
struct v3d_exec_info *exec;
|
||||
struct ww_acquire_ctx acquire_ctx;
|
||||
struct drm_syncobj *sync_out;
|
||||
int ret = 0;
|
||||
|
||||
if (args->pad != 0) {
|
||||
DRM_INFO("pad must be zero: %d\n", args->pad);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
|
||||
if (!exec)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = pm_runtime_get_sync(v3d->dev);
|
||||
if (ret < 0) {
|
||||
kfree(exec);
|
||||
return ret;
|
||||
}
|
||||
|
||||
kref_init(&exec->refcount);
|
||||
|
||||
ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
|
||||
&exec->bin.in_fence);
|
||||
if (ret == -EINVAL)
|
||||
goto fail;
|
||||
|
||||
ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
|
||||
&exec->render.in_fence);
|
||||
if (ret == -EINVAL)
|
||||
goto fail;
|
||||
|
||||
exec->qma = args->qma;
|
||||
exec->qms = args->qms;
|
||||
exec->qts = args->qts;
|
||||
exec->bin.exec = exec;
|
||||
exec->bin.start = args->bcl_start;
|
||||
exec->bin.end = args->bcl_end;
|
||||
exec->render.exec = exec;
|
||||
exec->render.start = args->rcl_start;
|
||||
exec->render.end = args->rcl_end;
|
||||
exec->v3d = v3d;
|
||||
INIT_LIST_HEAD(&exec->unref_list);
|
||||
|
||||
ret = v3d_cl_lookup_bos(dev, file_priv, args, exec);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (exec->bin.start != exec->bin.end) {
|
||||
ret = drm_sched_job_init(&exec->bin.base,
|
||||
&v3d->queue[V3D_BIN].sched,
|
||||
&v3d_priv->sched_entity[V3D_BIN],
|
||||
v3d_priv);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
|
||||
exec->bin_done_fence =
|
||||
dma_fence_get(&exec->bin.base.s_fence->finished);
|
||||
|
||||
kref_get(&exec->refcount); /* put by scheduler job completion */
|
||||
drm_sched_entity_push_job(&exec->bin.base,
|
||||
&v3d_priv->sched_entity[V3D_BIN]);
|
||||
}
|
||||
|
||||
ret = drm_sched_job_init(&exec->render.base,
|
||||
&v3d->queue[V3D_RENDER].sched,
|
||||
&v3d_priv->sched_entity[V3D_RENDER],
|
||||
v3d_priv);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
|
||||
kref_get(&exec->refcount); /* put by scheduler job completion */
|
||||
drm_sched_entity_push_job(&exec->render.base,
|
||||
&v3d_priv->sched_entity[V3D_RENDER]);
|
||||
|
||||
v3d_attach_object_fences(exec);
|
||||
|
||||
v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
|
||||
|
||||
/* Update the return sync object for the */
|
||||
sync_out = drm_syncobj_find(file_priv, args->out_sync);
|
||||
if (sync_out) {
|
||||
drm_syncobj_replace_fence(sync_out,
|
||||
&exec->render.base.s_fence->finished);
|
||||
drm_syncobj_put(sync_out);
|
||||
}
|
||||
|
||||
v3d_exec_put(exec);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_unreserve:
|
||||
v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
|
||||
fail:
|
||||
v3d_exec_put(exec);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
v3d_gem_init(struct drm_device *dev)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
u32 pt_size = 4096 * 1024;
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < V3D_MAX_QUEUES; i++)
|
||||
v3d->queue[i].fence_context = dma_fence_context_alloc(1);
|
||||
|
||||
spin_lock_init(&v3d->mm_lock);
|
||||
spin_lock_init(&v3d->job_lock);
|
||||
mutex_init(&v3d->bo_lock);
|
||||
mutex_init(&v3d->reset_lock);
|
||||
|
||||
/* Note: We don't allocate address 0. Various bits of HW
|
||||
* treat 0 as special, such as the occlusion query counters
|
||||
* where 0 means "disabled".
|
||||
*/
|
||||
drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
|
||||
|
||||
v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
|
||||
&v3d->pt_paddr,
|
||||
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
|
||||
if (!v3d->pt) {
|
||||
drm_mm_takedown(&v3d->mm);
|
||||
dev_err(v3d->dev,
|
||||
"Failed to allocate page tables. "
|
||||
"Please ensure you have CMA enabled.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
v3d_init_hw_state(v3d);
|
||||
v3d_mmu_set_page_table(v3d);
|
||||
|
||||
ret = v3d_sched_init(v3d);
|
||||
if (ret) {
|
||||
drm_mm_takedown(&v3d->mm);
|
||||
dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
|
||||
v3d->pt_paddr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
v3d_gem_destroy(struct drm_device *dev)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||
enum v3d_queue q;
|
||||
|
||||
v3d_sched_fini(v3d);
|
||||
|
||||
/* Waiting for exec to finish would need to be done before
|
||||
* unregistering V3D.
|
||||
*/
|
||||
for (q = 0; q < V3D_MAX_QUEUES; q++) {
|
||||
WARN_ON(v3d->queue[q].emit_seqno !=
|
||||
v3d->queue[q].finished_seqno);
|
||||
}
|
||||
|
||||
drm_mm_takedown(&v3d->mm);
|
||||
|
||||
dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);
|
||||
}
|
|
@ -0,0 +1,206 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2014-2018 Broadcom */
|
||||
|
||||
/**
|
||||
* DOC: Interrupt management for the V3D engine
|
||||
*
|
||||
* When we take a binning or rendering flush done interrupt, we need
|
||||
* to signal the fence for that job so that the scheduler can queue up
|
||||
* the next one and unblock any waiters.
|
||||
*
|
||||
* When we take the binner out of memory interrupt, we need to
|
||||
* allocate some new memory and pass it to the binner so that the
|
||||
* current job can make progress.
|
||||
*/
|
||||
|
||||
#include "v3d_drv.h"
|
||||
#include "v3d_regs.h"
|
||||
|
||||
#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
|
||||
V3D_INT_FLDONE | \
|
||||
V3D_INT_FRDONE | \
|
||||
V3D_INT_GMPV))
|
||||
|
||||
#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
|
||||
V3D_HUB_INT_MMU_PTI | \
|
||||
V3D_HUB_INT_MMU_CAP))
|
||||
|
||||
static void
|
||||
v3d_overflow_mem_work(struct work_struct *work)
|
||||
{
|
||||
struct v3d_dev *v3d =
|
||||
container_of(work, struct v3d_dev, overflow_mem_work);
|
||||
struct drm_device *dev = &v3d->drm;
|
||||
struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
|
||||
unsigned long irqflags;
|
||||
|
||||
if (IS_ERR(bo)) {
|
||||
DRM_ERROR("Couldn't allocate binner overflow mem\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* We lost a race, and our work task came in after the bin job
|
||||
* completed and exited. This can happen because the HW
|
||||
* signals OOM before it's fully OOM, so the binner might just
|
||||
* barely complete.
|
||||
*
|
||||
* If we lose the race and our work task comes in after a new
|
||||
* bin job got scheduled, that's fine. We'll just give them
|
||||
* some binner pool anyway.
|
||||
*/
|
||||
spin_lock_irqsave(&v3d->job_lock, irqflags);
|
||||
if (!v3d->bin_job) {
|
||||
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
drm_gem_object_get(&bo->base);
|
||||
list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
|
||||
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
|
||||
|
||||
V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
|
||||
V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size);
|
||||
|
||||
out:
|
||||
drm_gem_object_put_unlocked(&bo->base);
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
v3d_irq(int irq, void *arg)
|
||||
{
|
||||
struct v3d_dev *v3d = arg;
|
||||
u32 intsts;
|
||||
irqreturn_t status = IRQ_NONE;
|
||||
|
||||
intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
|
||||
|
||||
/* Acknowledge the interrupts we're handling here. */
|
||||
V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
|
||||
|
||||
if (intsts & V3D_INT_OUTOMEM) {
|
||||
/* Note that the OOM status is edge signaled, so the
|
||||
* interrupt won't happen again until the we actually
|
||||
* add more memory.
|
||||
*/
|
||||
schedule_work(&v3d->overflow_mem_work);
|
||||
status = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (intsts & V3D_INT_FLDONE) {
|
||||
v3d->queue[V3D_BIN].finished_seqno++;
|
||||
dma_fence_signal(v3d->bin_job->bin.done_fence);
|
||||
status = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (intsts & V3D_INT_FRDONE) {
|
||||
v3d->queue[V3D_RENDER].finished_seqno++;
|
||||
dma_fence_signal(v3d->render_job->render.done_fence);
|
||||
|
||||
status = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* We shouldn't be triggering these if we have GMP in
|
||||
* always-allowed mode.
|
||||
*/
|
||||
if (intsts & V3D_INT_GMPV)
|
||||
dev_err(v3d->dev, "GMP violation\n");
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
v3d_hub_irq(int irq, void *arg)
|
||||
{
|
||||
struct v3d_dev *v3d = arg;
|
||||
u32 intsts;
|
||||
irqreturn_t status = IRQ_NONE;
|
||||
|
||||
intsts = V3D_READ(V3D_HUB_INT_STS);
|
||||
|
||||
/* Acknowledge the interrupts we're handling here. */
|
||||
V3D_WRITE(V3D_HUB_INT_CLR, intsts);
|
||||
|
||||
if (intsts & (V3D_HUB_INT_MMU_WRV |
|
||||
V3D_HUB_INT_MMU_PTI |
|
||||
V3D_HUB_INT_MMU_CAP)) {
|
||||
u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
|
||||
u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8;
|
||||
|
||||
dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n",
|
||||
axi_id, (long long)vio_addr,
|
||||
((intsts & V3D_HUB_INT_MMU_WRV) ?
|
||||
", write violation" : ""),
|
||||
((intsts & V3D_HUB_INT_MMU_PTI) ?
|
||||
", pte invalid" : ""),
|
||||
((intsts & V3D_HUB_INT_MMU_CAP) ?
|
||||
", cap exceeded" : ""));
|
||||
status = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void
|
||||
v3d_irq_init(struct v3d_dev *v3d)
|
||||
{
|
||||
int ret, core;
|
||||
|
||||
INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
|
||||
|
||||
/* Clear any pending interrupts someone might have left around
|
||||
* for us.
|
||||
*/
|
||||
for (core = 0; core < v3d->cores; core++)
|
||||
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
|
||||
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
|
||||
|
||||
ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
|
||||
v3d_hub_irq, IRQF_SHARED,
|
||||
"v3d_hub", v3d);
|
||||
ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
|
||||
v3d_irq, IRQF_SHARED,
|
||||
"v3d_core0", v3d);
|
||||
if (ret)
|
||||
dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
|
||||
|
||||
v3d_irq_enable(v3d);
|
||||
}
|
||||
|
||||
void
|
||||
v3d_irq_enable(struct v3d_dev *v3d)
|
||||
{
|
||||
int core;
|
||||
|
||||
/* Enable our set of interrupts, masking out any others. */
|
||||
for (core = 0; core < v3d->cores; core++) {
|
||||
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
|
||||
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
|
||||
}
|
||||
|
||||
V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
|
||||
V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
|
||||
}
|
||||
|
||||
void
|
||||
v3d_irq_disable(struct v3d_dev *v3d)
|
||||
{
|
||||
int core;
|
||||
|
||||
/* Disable all interrupts. */
|
||||
for (core = 0; core < v3d->cores; core++)
|
||||
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
|
||||
V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
|
||||
|
||||
/* Clear any pending interrupts we might have left. */
|
||||
for (core = 0; core < v3d->cores; core++)
|
||||
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
|
||||
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
|
||||
|
||||
cancel_work_sync(&v3d->overflow_mem_work);
|
||||
}
|
||||
|
||||
/** Reinitializes interrupt registers when a GPU reset is performed. */
|
||||
void v3d_irq_reset(struct v3d_dev *v3d)
|
||||
{
|
||||
v3d_irq_enable(v3d);
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2017-2018 Broadcom */
|
||||
|
||||
/**
|
||||
* DOC: Broadcom V3D MMU
|
||||
*
|
||||
* The V3D 3.x hardware (compared to VC4) now includes an MMU. It has
|
||||
* a single level of page tables for the V3D's 4GB address space to
|
||||
* map to AXI bus addresses, thus it could need up to 4MB of
|
||||
* physically contiguous memory to store the PTEs.
|
||||
*
|
||||
* Because the 4MB of contiguous memory for page tables is precious,
|
||||
* and switching between them is expensive, we load all BOs into the
|
||||
* same 4GB address space.
|
||||
*
|
||||
* To protect clients from each other, we should use the GMP to
|
||||
* quickly mask out (at 128kb granularity) what pages are available to
|
||||
* each client. This is not yet implemented.
|
||||
*/
|
||||
|
||||
#include "v3d_drv.h"
|
||||
#include "v3d_regs.h"
|
||||
|
||||
#define V3D_MMU_PAGE_SHIFT 12
|
||||
|
||||
/* Note: All PTEs for the 1MB superpage must be filled with the
|
||||
* superpage bit set.
|
||||
*/
|
||||
#define V3D_PTE_SUPERPAGE BIT(31)
|
||||
#define V3D_PTE_WRITEABLE BIT(29)
|
||||
#define V3D_PTE_VALID BIT(28)
|
||||
|
||||
static int v3d_mmu_flush_all(struct v3d_dev *v3d)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Make sure that another flush isn't already running when we
|
||||
* start this one.
|
||||
*/
|
||||
ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
|
||||
V3D_MMU_CTL_TLB_CLEARING), 100);
|
||||
if (ret)
|
||||
dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n");
|
||||
|
||||
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
|
||||
V3D_MMU_CTL_TLB_CLEAR);
|
||||
|
||||
V3D_WRITE(V3D_MMUC_CONTROL,
|
||||
V3D_MMUC_CONTROL_FLUSH |
|
||||
V3D_MMUC_CONTROL_ENABLE);
|
||||
|
||||
ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
|
||||
V3D_MMU_CTL_TLB_CLEARING), 100);
|
||||
if (ret) {
|
||||
dev_err(v3d->dev, "TLB clear wait idle failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
|
||||
V3D_MMUC_CONTROL_FLUSHING), 100);
|
||||
if (ret)
|
||||
dev_err(v3d->dev, "MMUC flush wait idle failed\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int v3d_mmu_set_page_table(struct v3d_dev *v3d)
|
||||
{
|
||||
V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT);
|
||||
V3D_WRITE(V3D_MMU_CTL,
|
||||
V3D_MMU_CTL_ENABLE |
|
||||
V3D_MMU_CTL_PT_INVALID |
|
||||
V3D_MMU_CTL_PT_INVALID_ABORT |
|
||||
V3D_MMU_CTL_WRITE_VIOLATION_ABORT |
|
||||
V3D_MMU_CTL_CAP_EXCEEDED_ABORT);
|
||||
V3D_WRITE(V3D_MMU_ILLEGAL_ADDR,
|
||||
(v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) |
|
||||
V3D_MMU_ILLEGAL_ADDR_ENABLE);
|
||||
V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE);
|
||||
|
||||
return v3d_mmu_flush_all(v3d);
|
||||
}
|
||||
|
||||
void v3d_mmu_insert_ptes(struct v3d_bo *bo)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
|
||||
u32 page = bo->node.start;
|
||||
u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
|
||||
unsigned int count;
|
||||
struct scatterlist *sgl;
|
||||
|
||||
for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) {
|
||||
u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
|
||||
u32 pte = page_prot | page_address;
|
||||
u32 i;
|
||||
|
||||
BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >=
|
||||
BIT(24));
|
||||
|
||||
for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++)
|
||||
v3d->pt[page++] = pte + i;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(page - bo->node.start !=
|
||||
bo->base.size >> V3D_MMU_PAGE_SHIFT);
|
||||
|
||||
if (v3d_mmu_flush_all(v3d))
|
||||
dev_err(v3d->dev, "MMU flush timeout\n");
|
||||
}
|
||||
|
||||
void v3d_mmu_remove_ptes(struct v3d_bo *bo)
|
||||
{
|
||||
struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
|
||||
u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT;
|
||||
u32 page;
|
||||
|
||||
for (page = bo->node.start; page < bo->node.start + npages; page++)
|
||||
v3d->pt[page] = 0;
|
||||
|
||||
if (v3d_mmu_flush_all(v3d))
|
||||
dev_err(v3d->dev, "MMU flush timeout\n");
|
||||
}
|
|
@ -0,0 +1,295 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2017-2018 Broadcom */
|
||||
|
||||
#ifndef V3D_REGS_H
|
||||
#define V3D_REGS_H
|
||||
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#define V3D_MASK(high, low) ((u32)GENMASK(high, low))
|
||||
/* Using the GNU statement expression extension */
|
||||
#define V3D_SET_FIELD(value, field) \
|
||||
({ \
|
||||
u32 fieldval = (value) << field##_SHIFT; \
|
||||
WARN_ON((fieldval & ~field##_MASK) != 0); \
|
||||
fieldval & field##_MASK; \
|
||||
})
|
||||
|
||||
#define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >> \
|
||||
field##_SHIFT)
|
||||
|
||||
/* Hub registers for shared hardware between V3D cores. */
|
||||
|
||||
#define V3D_HUB_AXICFG 0x00000
|
||||
# define V3D_HUB_AXICFG_MAX_LEN_MASK V3D_MASK(3, 0)
|
||||
# define V3D_HUB_AXICFG_MAX_LEN_SHIFT 0
|
||||
#define V3D_HUB_UIFCFG 0x00004
|
||||
#define V3D_HUB_IDENT0 0x00008
|
||||
|
||||
#define V3D_HUB_IDENT1 0x0000c
|
||||
# define V3D_HUB_IDENT1_WITH_MSO BIT(19)
|
||||
# define V3D_HUB_IDENT1_WITH_TSY BIT(18)
|
||||
# define V3D_HUB_IDENT1_WITH_TFU BIT(17)
|
||||
# define V3D_HUB_IDENT1_WITH_L3C BIT(16)
|
||||
# define V3D_HUB_IDENT1_NHOSTS_MASK V3D_MASK(15, 12)
|
||||
# define V3D_HUB_IDENT1_NHOSTS_SHIFT 12
|
||||
# define V3D_HUB_IDENT1_NCORES_MASK V3D_MASK(11, 8)
|
||||
# define V3D_HUB_IDENT1_NCORES_SHIFT 8
|
||||
# define V3D_HUB_IDENT1_REV_MASK V3D_MASK(7, 4)
|
||||
# define V3D_HUB_IDENT1_REV_SHIFT 4
|
||||
# define V3D_HUB_IDENT1_TVER_MASK V3D_MASK(3, 0)
|
||||
# define V3D_HUB_IDENT1_TVER_SHIFT 0
|
||||
|
||||
#define V3D_HUB_IDENT2 0x00010
|
||||
# define V3D_HUB_IDENT2_WITH_MMU BIT(8)
|
||||
# define V3D_HUB_IDENT2_L3C_NKB_MASK V3D_MASK(7, 0)
|
||||
# define V3D_HUB_IDENT2_L3C_NKB_SHIFT 0
|
||||
|
||||
#define V3D_HUB_IDENT3 0x00014
|
||||
# define V3D_HUB_IDENT3_IPREV_MASK V3D_MASK(15, 8)
|
||||
# define V3D_HUB_IDENT3_IPREV_SHIFT 8
|
||||
# define V3D_HUB_IDENT3_IPIDX_MASK V3D_MASK(7, 0)
|
||||
# define V3D_HUB_IDENT3_IPIDX_SHIFT 0
|
||||
|
||||
#define V3D_HUB_INT_STS 0x00050
|
||||
#define V3D_HUB_INT_SET 0x00054
|
||||
#define V3D_HUB_INT_CLR 0x00058
|
||||
#define V3D_HUB_INT_MSK_STS 0x0005c
|
||||
#define V3D_HUB_INT_MSK_SET 0x00060
|
||||
#define V3D_HUB_INT_MSK_CLR 0x00064
|
||||
# define V3D_HUB_INT_MMU_WRV BIT(5)
|
||||
# define V3D_HUB_INT_MMU_PTI BIT(4)
|
||||
# define V3D_HUB_INT_MMU_CAP BIT(3)
|
||||
# define V3D_HUB_INT_MSO BIT(2)
|
||||
# define V3D_HUB_INT_TFUC BIT(1)
|
||||
# define V3D_HUB_INT_TFUF BIT(0)
|
||||
|
||||
#define V3D_GCA_CACHE_CTRL 0x0000c
|
||||
# define V3D_GCA_CACHE_CTRL_FLUSH BIT(0)
|
||||
|
||||
#define V3D_GCA_SAFE_SHUTDOWN 0x000b0
|
||||
# define V3D_GCA_SAFE_SHUTDOWN_EN BIT(0)
|
||||
|
||||
#define V3D_GCA_SAFE_SHUTDOWN_ACK 0x000b4
|
||||
# define V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED 3
|
||||
|
||||
# define V3D_TOP_GR_BRIDGE_REVISION 0x00000
|
||||
# define V3D_TOP_GR_BRIDGE_MAJOR_MASK V3D_MASK(15, 8)
|
||||
# define V3D_TOP_GR_BRIDGE_MAJOR_SHIFT 8
|
||||
# define V3D_TOP_GR_BRIDGE_MINOR_MASK V3D_MASK(7, 0)
|
||||
# define V3D_TOP_GR_BRIDGE_MINOR_SHIFT 0
|
||||
|
||||
/* 7268 reset reg */
|
||||
# define V3D_TOP_GR_BRIDGE_SW_INIT_0 0x00008
|
||||
# define V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT BIT(0)
|
||||
/* 7278 reset reg */
|
||||
# define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c
|
||||
# define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0)
|
||||
|
||||
/* Per-MMU registers. */
|
||||
|
||||
#define V3D_MMUC_CONTROL 0x01000
|
||||
# define V3D_MMUC_CONTROL_CLEAR BIT(3)
|
||||
# define V3D_MMUC_CONTROL_FLUSHING BIT(2)
|
||||
# define V3D_MMUC_CONTROL_FLUSH BIT(1)
|
||||
# define V3D_MMUC_CONTROL_ENABLE BIT(0)
|
||||
|
||||
#define V3D_MMU_CTL 0x01200
|
||||
# define V3D_MMU_CTL_CAP_EXCEEDED BIT(27)
|
||||
# define V3D_MMU_CTL_CAP_EXCEEDED_ABORT BIT(26)
|
||||
# define V3D_MMU_CTL_CAP_EXCEEDED_INT BIT(25)
|
||||
# define V3D_MMU_CTL_CAP_EXCEEDED_EXCEPTION BIT(24)
|
||||
# define V3D_MMU_CTL_PT_INVALID BIT(20)
|
||||
# define V3D_MMU_CTL_PT_INVALID_ABORT BIT(19)
|
||||
# define V3D_MMU_CTL_PT_INVALID_INT BIT(18)
|
||||
# define V3D_MMU_CTL_PT_INVALID_EXCEPTION BIT(17)
|
||||
# define V3D_MMU_CTL_WRITE_VIOLATION BIT(16)
|
||||
# define V3D_MMU_CTL_WRITE_VIOLATION_ABORT BIT(11)
|
||||
# define V3D_MMU_CTL_WRITE_VIOLATION_INT BIT(10)
|
||||
# define V3D_MMU_CTL_WRITE_VIOLATION_EXCEPTION BIT(9)
|
||||
# define V3D_MMU_CTL_TLB_CLEARING BIT(7)
|
||||
# define V3D_MMU_CTL_TLB_STATS_CLEAR BIT(3)
|
||||
# define V3D_MMU_CTL_TLB_CLEAR BIT(2)
|
||||
# define V3D_MMU_CTL_TLB_STATS_ENABLE BIT(1)
|
||||
# define V3D_MMU_CTL_ENABLE BIT(0)
|
||||
|
||||
#define V3D_MMU_PT_PA_BASE 0x01204
|
||||
#define V3D_MMU_HIT 0x01208
|
||||
#define V3D_MMU_MISSES 0x0120c
|
||||
#define V3D_MMU_STALLS 0x01210
|
||||
|
||||
#define V3D_MMU_ADDR_CAP 0x01214
|
||||
# define V3D_MMU_ADDR_CAP_ENABLE BIT(31)
|
||||
# define V3D_MMU_ADDR_CAP_MPAGE_MASK V3D_MASK(11, 0)
|
||||
# define V3D_MMU_ADDR_CAP_MPAGE_SHIFT 0
|
||||
|
||||
#define V3D_MMU_SHOOT_DOWN 0x01218
|
||||
# define V3D_MMU_SHOOT_DOWN_SHOOTING BIT(29)
|
||||
# define V3D_MMU_SHOOT_DOWN_SHOOT BIT(28)
|
||||
# define V3D_MMU_SHOOT_DOWN_PAGE_MASK V3D_MASK(27, 0)
|
||||
# define V3D_MMU_SHOOT_DOWN_PAGE_SHIFT 0
|
||||
|
||||
#define V3D_MMU_BYPASS_START 0x0121c
|
||||
#define V3D_MMU_BYPASS_END 0x01220
|
||||
|
||||
/* AXI ID of the access that faulted */
|
||||
#define V3D_MMU_VIO_ID 0x0122c
|
||||
|
||||
/* Address for illegal PTEs to return */
|
||||
#define V3D_MMU_ILLEGAL_ADDR 0x01230
|
||||
# define V3D_MMU_ILLEGAL_ADDR_ENABLE BIT(31)
|
||||
|
||||
/* Address that faulted */
|
||||
#define V3D_MMU_VIO_ADDR 0x01234
|
||||
|
||||
/* Per-V3D-core registers */
|
||||
|
||||
#define V3D_CTL_IDENT0 0x00000
|
||||
# define V3D_IDENT0_VER_MASK V3D_MASK(31, 24)
|
||||
# define V3D_IDENT0_VER_SHIFT 24
|
||||
|
||||
#define V3D_CTL_IDENT1 0x00004
|
||||
/* Multiples of 1kb */
|
||||
# define V3D_IDENT1_VPM_SIZE_MASK V3D_MASK(31, 28)
|
||||
# define V3D_IDENT1_VPM_SIZE_SHIFT 28
|
||||
# define V3D_IDENT1_NSEM_MASK V3D_MASK(23, 16)
|
||||
# define V3D_IDENT1_NSEM_SHIFT 16
|
||||
# define V3D_IDENT1_NTMU_MASK V3D_MASK(15, 12)
|
||||
# define V3D_IDENT1_NTMU_SHIFT 12
|
||||
# define V3D_IDENT1_QUPS_MASK V3D_MASK(11, 8)
|
||||
# define V3D_IDENT1_QUPS_SHIFT 8
|
||||
# define V3D_IDENT1_NSLC_MASK V3D_MASK(7, 4)
|
||||
# define V3D_IDENT1_NSLC_SHIFT 4
|
||||
# define V3D_IDENT1_REV_MASK V3D_MASK(3, 0)
|
||||
# define V3D_IDENT1_REV_SHIFT 0
|
||||
|
||||
#define V3D_CTL_IDENT2 0x00008
|
||||
# define V3D_IDENT2_BCG_INT BIT(28)
|
||||
|
||||
#define V3D_CTL_MISCCFG 0x00018
|
||||
# define V3D_MISCCFG_OVRTMUOUT BIT(0)
|
||||
|
||||
#define V3D_CTL_L2CACTL 0x00020
|
||||
# define V3D_L2CACTL_L2CCLR BIT(2)
|
||||
# define V3D_L2CACTL_L2CDIS BIT(1)
|
||||
# define V3D_L2CACTL_L2CENA BIT(0)
|
||||
|
||||
#define V3D_CTL_SLCACTL 0x00024
|
||||
# define V3D_SLCACTL_TVCCS_MASK V3D_MASK(27, 24)
|
||||
# define V3D_SLCACTL_TVCCS_SHIFT 24
|
||||
# define V3D_SLCACTL_TDCCS_MASK V3D_MASK(19, 16)
|
||||
# define V3D_SLCACTL_TDCCS_SHIFT 16
|
||||
# define V3D_SLCACTL_UCC_MASK V3D_MASK(11, 8)
|
||||
# define V3D_SLCACTL_UCC_SHIFT 8
|
||||
# define V3D_SLCACTL_ICC_MASK V3D_MASK(3, 0)
|
||||
# define V3D_SLCACTL_ICC_SHIFT 0
|
||||
|
||||
#define V3D_CTL_L2TCACTL 0x00030
|
||||
# define V3D_L2TCACTL_TMUWCF BIT(8)
|
||||
# define V3D_L2TCACTL_L2T_NO_WM BIT(4)
|
||||
# define V3D_L2TCACTL_FLM_FLUSH 0
|
||||
# define V3D_L2TCACTL_FLM_CLEAR 1
|
||||
# define V3D_L2TCACTL_FLM_CLEAN 2
|
||||
# define V3D_L2TCACTL_FLM_MASK V3D_MASK(2, 1)
|
||||
# define V3D_L2TCACTL_FLM_SHIFT 1
|
||||
# define V3D_L2TCACTL_L2TFLS BIT(0)
|
||||
#define V3D_CTL_L2TFLSTA 0x00034
|
||||
#define V3D_CTL_L2TFLEND 0x00038
|
||||
|
||||
#define V3D_CTL_INT_STS 0x00050
|
||||
#define V3D_CTL_INT_SET 0x00054
|
||||
#define V3D_CTL_INT_CLR 0x00058
|
||||
#define V3D_CTL_INT_MSK_STS 0x0005c
|
||||
#define V3D_CTL_INT_MSK_SET 0x00060
|
||||
#define V3D_CTL_INT_MSK_CLR 0x00064
|
||||
# define V3D_INT_QPU_MASK V3D_MASK(27, 16)
|
||||
# define V3D_INT_QPU_SHIFT 16
|
||||
# define V3D_INT_GMPV BIT(5)
|
||||
# define V3D_INT_TRFB BIT(4)
|
||||
# define V3D_INT_SPILLUSE BIT(3)
|
||||
# define V3D_INT_OUTOMEM BIT(2)
|
||||
# define V3D_INT_FLDONE BIT(1)
|
||||
# define V3D_INT_FRDONE BIT(0)
|
||||
|
||||
#define V3D_CLE_CT0CS 0x00100
|
||||
#define V3D_CLE_CT1CS 0x00104
|
||||
#define V3D_CLE_CTNCS(n) (V3D_CLE_CT0CS + 4 * n)
|
||||
#define V3D_CLE_CT0EA 0x00108
|
||||
#define V3D_CLE_CT1EA 0x0010c
|
||||
#define V3D_CLE_CTNEA(n) (V3D_CLE_CT0EA + 4 * n)
|
||||
#define V3D_CLE_CT0CA 0x00110
|
||||
#define V3D_CLE_CT1CA 0x00114
|
||||
#define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n)
|
||||
#define V3D_CLE_CT0RA 0x00118
|
||||
#define V3D_CLE_CT1RA 0x0011c
|
||||
#define V3D_CLE_CT0LC 0x00120
|
||||
#define V3D_CLE_CT1LC 0x00124
|
||||
#define V3D_CLE_CT0PC 0x00128
|
||||
#define V3D_CLE_CT1PC 0x0012c
|
||||
#define V3D_CLE_PCS 0x00130
|
||||
#define V3D_CLE_BFC 0x00134
|
||||
#define V3D_CLE_RFC 0x00138
|
||||
#define V3D_CLE_TFBC 0x0013c
|
||||
#define V3D_CLE_TFIT 0x00140
|
||||
#define V3D_CLE_CT1CFG 0x00144
|
||||
#define V3D_CLE_CT1TILECT 0x00148
|
||||
#define V3D_CLE_CT1TSKIP 0x0014c
|
||||
#define V3D_CLE_CT1PTCT 0x00150
|
||||
#define V3D_CLE_CT0SYNC 0x00154
|
||||
#define V3D_CLE_CT1SYNC 0x00158
|
||||
#define V3D_CLE_CT0QTS 0x0015c
|
||||
# define V3D_CLE_CT0QTS_ENABLE BIT(1)
|
||||
#define V3D_CLE_CT0QBA 0x00160
|
||||
#define V3D_CLE_CT1QBA 0x00164
|
||||
#define V3D_CLE_CTNQBA(n) (V3D_CLE_CT0QBA + 4 * n)
|
||||
#define V3D_CLE_CT0QEA 0x00168
|
||||
#define V3D_CLE_CT1QEA 0x0016c
|
||||
#define V3D_CLE_CTNQEA(n) (V3D_CLE_CT0QEA + 4 * n)
|
||||
#define V3D_CLE_CT0QMA 0x00170
|
||||
#define V3D_CLE_CT0QMS 0x00174
|
||||
#define V3D_CLE_CT1QCFG 0x00178
|
||||
/* If set without ETPROC, entirely skip tiles with no primitives. */
|
||||
# define V3D_CLE_QCFG_ETFILT BIT(7)
|
||||
/* If set with ETFILT, just write the clear color to tiles with no
|
||||
* primitives.
|
||||
*/
|
||||
# define V3D_CLE_QCFG_ETPROC BIT(6)
|
||||
# define V3D_CLE_QCFG_ETSFLUSH BIT(1)
|
||||
# define V3D_CLE_QCFG_MCDIS BIT(0)
|
||||
|
||||
#define V3D_PTB_BPCA 0x00300
|
||||
#define V3D_PTB_BPCS 0x00304
|
||||
#define V3D_PTB_BPOA 0x00308
|
||||
#define V3D_PTB_BPOS 0x0030c
|
||||
|
||||
#define V3D_PTB_BXCF 0x00310
|
||||
# define V3D_PTB_BXCF_RWORDERDISA BIT(1)
|
||||
# define V3D_PTB_BXCF_CLIPDISA BIT(0)
|
||||
|
||||
#define V3D_GMP_STATUS 0x00800
|
||||
# define V3D_GMP_STATUS_GMPRST BIT(31)
|
||||
# define V3D_GMP_STATUS_WR_COUNT_MASK V3D_MASK(30, 24)
|
||||
# define V3D_GMP_STATUS_WR_COUNT_SHIFT 24
|
||||
# define V3D_GMP_STATUS_RD_COUNT_MASK V3D_MASK(22, 16)
|
||||
# define V3D_GMP_STATUS_RD_COUNT_SHIFT 16
|
||||
# define V3D_GMP_STATUS_WR_ACTIVE BIT(5)
|
||||
# define V3D_GMP_STATUS_RD_ACTIVE BIT(4)
|
||||
# define V3D_GMP_STATUS_CFG_BUSY BIT(3)
|
||||
# define V3D_GMP_STATUS_CNTOVF BIT(2)
|
||||
# define V3D_GMP_STATUS_INVPROT BIT(1)
|
||||
# define V3D_GMP_STATUS_VIO BIT(0)
|
||||
|
||||
#define V3D_GMP_CFG 0x00804
|
||||
# define V3D_GMP_CFG_LBURSTEN BIT(3)
|
||||
# define V3D_GMP_CFG_PGCRSEN BIT()
|
||||
# define V3D_GMP_CFG_STOP_REQ BIT(1)
|
||||
# define V3D_GMP_CFG_PROT_ENABLE BIT(0)
|
||||
|
||||
#define V3D_GMP_VIO_ADDR 0x00808
|
||||
#define V3D_GMP_VIO_TYPE 0x0080c
|
||||
#define V3D_GMP_TABLE_ADDR 0x00810
|
||||
#define V3D_GMP_CLEAR_LOAD 0x00814
|
||||
#define V3D_GMP_PRESERVE_LOAD 0x00818
|
||||
#define V3D_GMP_VALID_LINES 0x00820
|
||||
|
||||
#endif /* V3D_REGS_H */
|
|
@ -0,0 +1,228 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2018 Broadcom */
|
||||
|
||||
/**
|
||||
* DOC: Broadcom V3D scheduling
|
||||
*
|
||||
* The shared DRM GPU scheduler is used to coordinate submitting jobs
|
||||
* to the hardware. Each DRM fd (roughly a client process) gets its
|
||||
* own scheduler entity, which will process jobs in order. The GPU
|
||||
* scheduler will round-robin between clients to submit the next job.
|
||||
*
|
||||
* For simplicity, and in order to keep latency low for interactive
|
||||
* jobs when bulk background jobs are queued up, we submit a new job
|
||||
* to the HW only when it has completed the last one, instead of
|
||||
* filling up the CT[01]Q FIFOs with jobs. Similarly, we use
|
||||
* v3d_job_dependency() to manage the dependency between bin and
|
||||
* render, instead of having the clients submit jobs with using the
|
||||
* HW's semaphores to interlock between them.
|
||||
*/
|
||||
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "v3d_drv.h"
|
||||
#include "v3d_regs.h"
|
||||
#include "v3d_trace.h"
|
||||
|
||||
static struct v3d_job *
|
||||
to_v3d_job(struct drm_sched_job *sched_job)
|
||||
{
|
||||
return container_of(sched_job, struct v3d_job, base);
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_job_free(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct v3d_job *job = to_v3d_job(sched_job);
|
||||
|
||||
v3d_exec_put(job->exec);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the fences that the bin job depends on, one by one.
|
||||
* v3d_job_run() won't be called until all of them have been signaled.
|
||||
*/
|
||||
static struct dma_fence *
|
||||
v3d_job_dependency(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *s_entity)
|
||||
{
|
||||
struct v3d_job *job = to_v3d_job(sched_job);
|
||||
struct v3d_exec_info *exec = job->exec;
|
||||
enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = job->in_fence;
|
||||
if (fence) {
|
||||
job->in_fence = NULL;
|
||||
return fence;
|
||||
}
|
||||
|
||||
if (q == V3D_RENDER) {
|
||||
/* If we had a bin job, the render job definitely depends on
|
||||
* it. We first have to wait for bin to be scheduled, so that
|
||||
* its done_fence is created.
|
||||
*/
|
||||
fence = exec->bin_done_fence;
|
||||
if (fence) {
|
||||
exec->bin_done_fence = NULL;
|
||||
return fence;
|
||||
}
|
||||
}
|
||||
|
||||
/* XXX: Wait on a fence for switching the GMP if necessary,
|
||||
* and then do so.
|
||||
*/
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct v3d_job *job = to_v3d_job(sched_job);
|
||||
struct v3d_exec_info *exec = job->exec;
|
||||
enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
|
||||
struct v3d_dev *v3d = exec->v3d;
|
||||
struct drm_device *dev = &v3d->drm;
|
||||
struct dma_fence *fence;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (unlikely(job->base.s_fence->finished.error))
|
||||
return NULL;
|
||||
|
||||
/* Lock required around bin_job update vs
|
||||
* v3d_overflow_mem_work().
|
||||
*/
|
||||
spin_lock_irqsave(&v3d->job_lock, irqflags);
|
||||
if (q == V3D_BIN) {
|
||||
v3d->bin_job = job->exec;
|
||||
|
||||
/* Clear out the overflow allocation, so we don't
|
||||
* reuse the overflow attached to a previous job.
|
||||
*/
|
||||
V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
|
||||
} else {
|
||||
v3d->render_job = job->exec;
|
||||
}
|
||||
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
|
||||
|
||||
/* Can we avoid this flush when q==RENDER? We need to be
|
||||
* careful of scheduling, though -- imagine job0 rendering to
|
||||
* texture and job1 reading, and them being executed as bin0,
|
||||
* bin1, render0, render1, so that render1's flush at bin time
|
||||
* wasn't enough.
|
||||
*/
|
||||
v3d_invalidate_caches(v3d);
|
||||
|
||||
fence = v3d_fence_create(v3d, q);
|
||||
if (!fence)
|
||||
return fence;
|
||||
|
||||
if (job->done_fence)
|
||||
dma_fence_put(job->done_fence);
|
||||
job->done_fence = dma_fence_get(fence);
|
||||
|
||||
trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno,
|
||||
job->start, job->end);
|
||||
|
||||
if (q == V3D_BIN) {
|
||||
if (exec->qma) {
|
||||
V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, exec->qma);
|
||||
V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, exec->qms);
|
||||
}
|
||||
if (exec->qts) {
|
||||
V3D_CORE_WRITE(0, V3D_CLE_CT0QTS,
|
||||
V3D_CLE_CT0QTS_ENABLE |
|
||||
exec->qts);
|
||||
}
|
||||
} else {
|
||||
/* XXX: Set the QCFG */
|
||||
}
|
||||
|
||||
/* Set the current and end address of the control list.
|
||||
* Writing the end register is what starts the job.
|
||||
*/
|
||||
V3D_CORE_WRITE(0, V3D_CLE_CTNQBA(q), job->start);
|
||||
V3D_CORE_WRITE(0, V3D_CLE_CTNQEA(q), job->end);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_job_timedout(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct v3d_job *job = to_v3d_job(sched_job);
|
||||
struct v3d_exec_info *exec = job->exec;
|
||||
struct v3d_dev *v3d = exec->v3d;
|
||||
enum v3d_queue q;
|
||||
|
||||
mutex_lock(&v3d->reset_lock);
|
||||
|
||||
/* block scheduler */
|
||||
for (q = 0; q < V3D_MAX_QUEUES; q++) {
|
||||
struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
|
||||
|
||||
kthread_park(sched->thread);
|
||||
drm_sched_hw_job_reset(sched, (sched_job->sched == sched ?
|
||||
sched_job : NULL));
|
||||
}
|
||||
|
||||
/* get the GPU back into the init state */
|
||||
v3d_reset(v3d);
|
||||
|
||||
/* Unblock schedulers and restart their jobs. */
|
||||
for (q = 0; q < V3D_MAX_QUEUES; q++) {
|
||||
drm_sched_job_recovery(&v3d->queue[q].sched);
|
||||
kthread_unpark(v3d->queue[q].sched.thread);
|
||||
}
|
||||
|
||||
mutex_unlock(&v3d->reset_lock);
|
||||
}
|
||||
|
||||
static const struct drm_sched_backend_ops v3d_sched_ops = {
|
||||
.dependency = v3d_job_dependency,
|
||||
.run_job = v3d_job_run,
|
||||
.timedout_job = v3d_job_timedout,
|
||||
.free_job = v3d_job_free
|
||||
};
|
||||
|
||||
int
|
||||
v3d_sched_init(struct v3d_dev *v3d)
|
||||
{
|
||||
int hw_jobs_limit = 1;
|
||||
int job_hang_limit = 0;
|
||||
int hang_limit_ms = 500;
|
||||
int ret;
|
||||
|
||||
ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
|
||||
&v3d_sched_ops,
|
||||
hw_jobs_limit, job_hang_limit,
|
||||
msecs_to_jiffies(hang_limit_ms),
|
||||
"v3d_bin");
|
||||
if (ret) {
|
||||
dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
|
||||
&v3d_sched_ops,
|
||||
hw_jobs_limit, job_hang_limit,
|
||||
msecs_to_jiffies(hang_limit_ms),
|
||||
"v3d_render");
|
||||
if (ret) {
|
||||
dev_err(v3d->dev, "Failed to create render scheduler: %d.",
|
||||
ret);
|
||||
drm_sched_fini(&v3d->queue[V3D_BIN].sched);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
v3d_sched_fini(struct v3d_dev *v3d)
|
||||
{
|
||||
enum v3d_queue q;
|
||||
|
||||
for (q = 0; q < V3D_MAX_QUEUES; q++)
|
||||
drm_sched_fini(&v3d->queue[q].sched);
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2015-2018 Broadcom */
|
||||
|
||||
#if !defined(_V3D_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _V3D_TRACE_H_
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM v3d
|
||||
#define TRACE_INCLUDE_FILE v3d_trace
|
||||
|
||||
TRACE_EVENT(v3d_submit_cl,
|
||||
TP_PROTO(struct drm_device *dev, bool is_render,
|
||||
uint64_t seqno,
|
||||
u32 ctnqba, u32 ctnqea),
|
||||
TP_ARGS(dev, is_render, seqno, ctnqba, ctnqea),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(bool, is_render)
|
||||
__field(u64, seqno)
|
||||
__field(u32, ctnqba)
|
||||
__field(u32, ctnqea)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = dev->primary->index;
|
||||
__entry->is_render = is_render;
|
||||
__entry->seqno = seqno;
|
||||
__entry->ctnqba = ctnqba;
|
||||
__entry->ctnqea = ctnqea;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, %s, seqno=%llu, 0x%08x..0x%08x",
|
||||
__entry->dev,
|
||||
__entry->is_render ? "RCL" : "BCL",
|
||||
__entry->seqno,
|
||||
__entry->ctnqba,
|
||||
__entry->ctnqea)
|
||||
);
|
||||
|
||||
TRACE_EVENT(v3d_reset_begin,
|
||||
TP_PROTO(struct drm_device *dev),
|
||||
TP_ARGS(dev),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = dev->primary->index;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u",
|
||||
__entry->dev)
|
||||
);
|
||||
|
||||
TRACE_EVENT(v3d_reset_end,
|
||||
TP_PROTO(struct drm_device *dev),
|
||||
TP_ARGS(dev),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = dev->primary->index;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u",
|
||||
__entry->dev)
|
||||
);
|
||||
|
||||
#endif /* _V3D_TRACE_H_ */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#include <trace/define_trace.h>
|
|
@ -0,0 +1,9 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* Copyright (C) 2015 Broadcom */
|
||||
|
||||
#include "v3d_drv.h"
|
||||
|
||||
#ifndef __CHECKER__
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "v3d_trace.h"
|
||||
#endif
|
|
@ -175,7 +175,8 @@ static struct drm_driver vc4_drm_driver = {
|
|||
DRIVER_GEM |
|
||||
DRIVER_HAVE_IRQ |
|
||||
DRIVER_RENDER |
|
||||
DRIVER_PRIME),
|
||||
DRIVER_PRIME |
|
||||
DRIVER_SYNCOBJ),
|
||||
.lastclose = drm_fb_helper_lastclose,
|
||||
.open = vc4_open,
|
||||
.postclose = vc4_close,
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_syncobj.h>
|
||||
|
||||
#include "uapi/drm/vc4_drm.h"
|
||||
|
||||
|
|
|
@ -753,6 +753,11 @@ static void vc4_dsi_ulps(struct vc4_dsi *dsi, bool ulps)
|
|||
(dsi->lanes > 2 ? DSI1_STAT_PHY_D2_STOP : 0) |
|
||||
(dsi->lanes > 3 ? DSI1_STAT_PHY_D3_STOP : 0));
|
||||
int ret;
|
||||
bool ulps_currently_enabled = (DSI_PORT_READ(PHY_AFEC0) &
|
||||
DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS));
|
||||
|
||||
if (ulps == ulps_currently_enabled)
|
||||
return;
|
||||
|
||||
DSI_PORT_WRITE(STAT, stat_ulps);
|
||||
DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) | phyc_ulps);
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/dma-fence-array.h>
|
||||
|
||||
#include "uapi/drm/vc4_drm.h"
|
||||
#include "vc4_drv.h"
|
||||
|
@ -655,7 +656,8 @@ vc4_lock_bo_reservations(struct drm_device *dev,
|
|||
*/
|
||||
static int
|
||||
vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
|
||||
struct ww_acquire_ctx *acquire_ctx)
|
||||
struct ww_acquire_ctx *acquire_ctx,
|
||||
struct drm_syncobj *out_sync)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct vc4_exec_info *renderjob;
|
||||
|
@ -678,6 +680,9 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
|
|||
fence->seqno = exec->seqno;
|
||||
exec->fence = &fence->base;
|
||||
|
||||
if (out_sync)
|
||||
drm_syncobj_replace_fence(out_sync, exec->fence);
|
||||
|
||||
vc4_update_bo_seqnos(exec, seqno);
|
||||
|
||||
vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
|
||||
|
@ -1113,8 +1118,10 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
|
|||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct vc4_file *vc4file = file_priv->driver_priv;
|
||||
struct drm_vc4_submit_cl *args = data;
|
||||
struct drm_syncobj *out_sync = NULL;
|
||||
struct vc4_exec_info *exec;
|
||||
struct ww_acquire_ctx acquire_ctx;
|
||||
struct dma_fence *in_fence;
|
||||
int ret = 0;
|
||||
|
||||
if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
|
||||
|
@ -1126,7 +1133,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
if (args->pad2 != 0) {
|
||||
DRM_DEBUG("->pad2 must be set to zero\n");
|
||||
DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1164,6 +1171,29 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
}
|
||||
|
||||
if (args->in_sync) {
|
||||
ret = drm_syncobj_find_fence(file_priv, args->in_sync,
|
||||
&in_fence);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* When the fence (or fence array) is exclusively from our
|
||||
* context we can skip the wait since jobs are executed in
|
||||
* order of their submission through this ioctl and this can
|
||||
* only have fences from a prior job.
|
||||
*/
|
||||
if (!dma_fence_match_context(in_fence,
|
||||
vc4->dma_fence_context)) {
|
||||
ret = dma_fence_wait(in_fence, true);
|
||||
if (ret) {
|
||||
dma_fence_put(in_fence);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
dma_fence_put(in_fence);
|
||||
}
|
||||
|
||||
if (exec->args->bin_cl_size != 0) {
|
||||
ret = vc4_get_bcl(dev, exec);
|
||||
if (ret)
|
||||
|
@ -1181,12 +1211,33 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (args->out_sync) {
|
||||
out_sync = drm_syncobj_find(file_priv, args->out_sync);
|
||||
if (!out_sync) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* We replace the fence in out_sync in vc4_queue_submit since
|
||||
* the render job could execute immediately after that call.
|
||||
* If it finishes before our ioctl processing resumes the
|
||||
* render job fence could already have been freed.
|
||||
*/
|
||||
}
|
||||
|
||||
/* Clear this out of the struct we'll be putting in the queue,
|
||||
* since it's part of our stack.
|
||||
*/
|
||||
exec->args = NULL;
|
||||
|
||||
ret = vc4_queue_submit(dev, exec, &acquire_ctx);
|
||||
ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
|
||||
|
||||
/* The syncobj isn't part of the exec data and we need to free our
|
||||
* reference even if job submission failed.
|
||||
*/
|
||||
if (out_sync)
|
||||
drm_syncobj_put(out_sync);
|
||||
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
|
|
|
@ -218,8 +218,7 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
|
|||
* overall CMA pool before they make scenes complicated enough to run
|
||||
* out of bin space.
|
||||
*/
|
||||
int
|
||||
vc4_allocate_bin_bo(struct drm_device *drm)
|
||||
static int vc4_allocate_bin_bo(struct drm_device *drm)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(drm);
|
||||
struct vc4_v3d *v3d = vc4->v3d;
|
||||
|
|
|
@ -159,14 +159,14 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
|
|||
DRM_RENDER_ALLOW),
|
||||
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
|
||||
vmw_kms_cursor_bypass_ioctl,
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW),
|
||||
DRM_MASTER),
|
||||
|
||||
VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW),
|
||||
DRM_MASTER),
|
||||
VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW),
|
||||
DRM_MASTER),
|
||||
VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW),
|
||||
DRM_MASTER),
|
||||
|
||||
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
|
|
|
@ -188,8 +188,8 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
|
|||
buf_cfg.be_alloc = front_info->cfg.be_alloc;
|
||||
|
||||
shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
|
||||
if (!shbuf)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(shbuf))
|
||||
return PTR_ERR(shbuf);
|
||||
|
||||
ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
|
||||
if (ret < 0) {
|
||||
|
@ -543,8 +543,8 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
|
|||
front_info->drm_info = drm_info;
|
||||
|
||||
drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
|
||||
if (!drm_dev) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(drm_dev)) {
|
||||
ret = PTR_ERR(drm_dev);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -778,7 +778,7 @@ static int xen_drv_remove(struct xenbus_device *dev)
|
|||
*/
|
||||
while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
|
||||
XenbusStateUnknown) != XenbusStateInitWait) &&
|
||||
to--)
|
||||
--to)
|
||||
msleep(10);
|
||||
|
||||
if (!to) {
|
||||
|
|
|
@ -383,7 +383,7 @@ xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
|
|||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (cfg->be_alloc)
|
||||
buf->ops = &backend_ops;
|
||||
|
|
|
@ -93,6 +93,9 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
|
|||
if (size < length)
|
||||
return -ENOSPC;
|
||||
|
||||
if (frame->picture_aspect > HDMI_PICTURE_ASPECT_16_9)
|
||||
return -EINVAL;
|
||||
|
||||
memset(buffer, 0, size);
|
||||
|
||||
ptr[0] = frame->type;
|
||||
|
|
|
@ -38,7 +38,6 @@ struct drm_device {
|
|||
struct device *dev; /**< Device structure of bus-device */
|
||||
struct drm_driver *driver; /**< DRM driver managing the device */
|
||||
void *dev_private; /**< DRM driver private data */
|
||||
struct drm_minor *control; /**< Control node */
|
||||
struct drm_minor *primary; /**< Primary node */
|
||||
struct drm_minor *render; /**< Render node */
|
||||
bool registered;
|
||||
|
|
|
@ -64,6 +64,11 @@
|
|||
/* AUX CH addresses */
|
||||
/* DPCD */
|
||||
#define DP_DPCD_REV 0x000
|
||||
# define DP_DPCD_REV_10 0x10
|
||||
# define DP_DPCD_REV_11 0x11
|
||||
# define DP_DPCD_REV_12 0x12
|
||||
# define DP_DPCD_REV_13 0x13
|
||||
# define DP_DPCD_REV_14 0x14
|
||||
|
||||
#define DP_MAX_LINK_RATE 0x001
|
||||
|
||||
|
@ -119,6 +124,7 @@
|
|||
# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
|
||||
|
||||
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
|
||||
# define DP_TRAINING_AUX_RD_MASK 0x7F /* XXX 1.2? */
|
||||
|
||||
#define DP_ADAPTER_CAP 0x00f /* 1.2 */
|
||||
# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
|
||||
|
@ -977,18 +983,18 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw);
|
|||
#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */
|
||||
/* 0x80+ CEA-861 infoframe types */
|
||||
|
||||
struct edp_sdp_header {
|
||||
struct dp_sdp_header {
|
||||
u8 HB0; /* Secondary Data Packet ID */
|
||||
u8 HB1; /* Secondary Data Packet Type */
|
||||
u8 HB2; /* 7:5 reserved, 4:0 revision number */
|
||||
u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */
|
||||
u8 HB2; /* Secondary Data Packet Specific header, Byte 0 */
|
||||
u8 HB3; /* Secondary Data packet Specific header, Byte 1 */
|
||||
} __packed;
|
||||
|
||||
#define EDP_SDP_HEADER_REVISION_MASK 0x1F
|
||||
#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
|
||||
|
||||
struct edp_vsc_psr {
|
||||
struct edp_sdp_header sdp_header;
|
||||
struct dp_sdp_header sdp_header;
|
||||
u8 DB0; /* Stereo Interface */
|
||||
u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
|
||||
u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
|
||||
|
|
|
@ -47,6 +47,9 @@ struct device;
|
|||
* header include loops we need it here for now.
|
||||
*/
|
||||
|
||||
/* Note that the order of this enum is ABI (it determines
|
||||
* /dev/dri/renderD* numbers).
|
||||
*/
|
||||
enum drm_minor_type {
|
||||
DRM_MINOR_PRIMARY,
|
||||
DRM_MINOR_CONTROL,
|
||||
|
@ -181,6 +184,14 @@ struct drm_file {
|
|||
/** @atomic: True if client understands atomic properties. */
|
||||
unsigned atomic:1;
|
||||
|
||||
/**
|
||||
* @aspect_ratio_allowed:
|
||||
*
|
||||
* True, if client can handle picture aspect ratios, and has requested
|
||||
* to pass this information along with the mode.
|
||||
*/
|
||||
unsigned aspect_ratio_allowed:1;
|
||||
|
||||
/**
|
||||
* @is_master:
|
||||
*
|
||||
|
@ -348,18 +359,6 @@ static inline bool drm_is_render_client(const struct drm_file *file_priv)
|
|||
return file_priv->minor->type == DRM_MINOR_RENDER;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_is_control_client - is this an open file of the control node
|
||||
* @file_priv: DRM file
|
||||
*
|
||||
* Control nodes are deprecated and in the process of getting removed from the
|
||||
* DRM userspace API. Do not ever use!
|
||||
*/
|
||||
static inline bool drm_is_control_client(const struct drm_file *file_priv)
|
||||
{
|
||||
return file_priv->minor->type == DRM_MINOR_CONTROL;
|
||||
}
|
||||
|
||||
int drm_open(struct inode *inode, struct file *filp);
|
||||
ssize_t drm_read(struct file *filp, char __user *buffer,
|
||||
size_t count, loff_t *offset);
|
||||
|
|
|
@ -108,13 +108,6 @@ enum drm_ioctl_flags {
|
|||
* This is equivalent to callers with the SYSADMIN capability.
|
||||
*/
|
||||
DRM_ROOT_ONLY = BIT(2),
|
||||
/**
|
||||
* @DRM_CONTROL_ALLOW:
|
||||
*
|
||||
* Deprecated, do not use. Control nodes are in the process of getting
|
||||
* removed.
|
||||
*/
|
||||
DRM_CONTROL_ALLOW = BIT(3),
|
||||
/**
|
||||
* @DRM_UNLOCKED:
|
||||
*
|
||||
|
|
|
@ -147,6 +147,12 @@ enum drm_mode_status {
|
|||
|
||||
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
|
||||
|
||||
#define DRM_MODE_MATCH_TIMINGS (1 << 0)
|
||||
#define DRM_MODE_MATCH_CLOCK (1 << 1)
|
||||
#define DRM_MODE_MATCH_FLAGS (1 << 2)
|
||||
#define DRM_MODE_MATCH_3D_FLAGS (1 << 3)
|
||||
#define DRM_MODE_MATCH_ASPECT_RATIO (1 << 4)
|
||||
|
||||
/**
|
||||
* struct drm_display_mode - DRM kernel-internal display mode structure
|
||||
* @hdisplay: horizontal display size
|
||||
|
@ -405,6 +411,19 @@ struct drm_display_mode {
|
|||
* Field for setting the HDMI picture aspect ratio of a mode.
|
||||
*/
|
||||
enum hdmi_picture_aspect picture_aspect_ratio;
|
||||
|
||||
/**
|
||||
* @export_head:
|
||||
*
|
||||
* struct list_head for modes to be exposed to the userspace.
|
||||
* This is to maintain a list of exposed modes while preparing
|
||||
* user-mode's list in drm_mode_getconnector ioctl. The purpose of this
|
||||
* list_head only lies in the ioctl function, and is not expected to be
|
||||
* used outside the function.
|
||||
* Once used, the stale pointers are not reset, but left as it is, to
|
||||
* avoid overhead of protecting it by mode_config.mutex.
|
||||
*/
|
||||
struct list_head export_head;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -490,6 +509,9 @@ void drm_mode_copy(struct drm_display_mode *dst,
|
|||
const struct drm_display_mode *src);
|
||||
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
|
||||
const struct drm_display_mode *mode);
|
||||
bool drm_mode_match(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2,
|
||||
unsigned int match_flags);
|
||||
bool drm_mode_equal(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2);
|
||||
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1,
|
||||
|
|
|
@ -260,7 +260,7 @@ struct drm_property *drm_property_create_object(struct drm_device *dev,
|
|||
uint32_t type);
|
||||
struct drm_property *drm_property_create_bool(struct drm_device *dev,
|
||||
u32 flags, const char *name);
|
||||
int drm_property_add_enum(struct drm_property *property, int index,
|
||||
int drm_property_add_enum(struct drm_property *property,
|
||||
uint64_t value, const char *name);
|
||||
void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
|
||||
|
||||
|
|
|
@ -175,8 +175,7 @@ static inline bool drm_rect_equals(const struct drm_rect *r1,
|
|||
|
||||
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
|
||||
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
|
||||
const struct drm_rect *clip,
|
||||
int hscale, int vscale);
|
||||
const struct drm_rect *clip);
|
||||
int drm_rect_calc_hscale(const struct drm_rect *src,
|
||||
const struct drm_rect *dst,
|
||||
int min_hscale, int max_hscale);
|
||||
|
|
|
@ -94,11 +94,11 @@ typedef void (*dma_fence_func_t)(struct dma_fence *fence,
|
|||
struct dma_fence_cb *cb);
|
||||
|
||||
/**
|
||||
* struct dma_fence_cb - callback for dma_fence_add_callback
|
||||
* @node: used by dma_fence_add_callback to append this struct to fence::cb_list
|
||||
* struct dma_fence_cb - callback for dma_fence_add_callback()
|
||||
* @node: used by dma_fence_add_callback() to append this struct to fence::cb_list
|
||||
* @func: dma_fence_func_t to call
|
||||
*
|
||||
* This struct will be initialized by dma_fence_add_callback, additional
|
||||
* This struct will be initialized by dma_fence_add_callback(), additional
|
||||
* data can be passed along by embedding dma_fence_cb in another struct.
|
||||
*/
|
||||
struct dma_fence_cb {
|
||||
|
@ -108,75 +108,143 @@ struct dma_fence_cb {
|
|||
|
||||
/**
|
||||
* struct dma_fence_ops - operations implemented for fence
|
||||
* @get_driver_name: returns the driver name.
|
||||
* @get_timeline_name: return the name of the context this fence belongs to.
|
||||
* @enable_signaling: enable software signaling of fence.
|
||||
* @signaled: [optional] peek whether the fence is signaled, can be null.
|
||||
* @wait: custom wait implementation, or dma_fence_default_wait.
|
||||
* @release: [optional] called on destruction of fence, can be null
|
||||
* @fill_driver_data: [optional] callback to fill in free-form debug info
|
||||
* Returns amount of bytes filled, or -errno.
|
||||
* @fence_value_str: [optional] fills in the value of the fence as a string
|
||||
* @timeline_value_str: [optional] fills in the current value of the timeline
|
||||
* as a string
|
||||
*
|
||||
* Notes on enable_signaling:
|
||||
* For fence implementations that have the capability for hw->hw
|
||||
* signaling, they can implement this op to enable the necessary
|
||||
* irqs, or insert commands into cmdstream, etc. This is called
|
||||
* in the first wait() or add_callback() path to let the fence
|
||||
* implementation know that there is another driver waiting on
|
||||
* the signal (ie. hw->sw case).
|
||||
*
|
||||
* This function can be called from atomic context, but not
|
||||
* from irq context, so normal spinlocks can be used.
|
||||
*
|
||||
* A return value of false indicates the fence already passed,
|
||||
* or some failure occurred that made it impossible to enable
|
||||
* signaling. True indicates successful enabling.
|
||||
*
|
||||
* fence->error may be set in enable_signaling, but only when false is
|
||||
* returned.
|
||||
*
|
||||
* Calling dma_fence_signal before enable_signaling is called allows
|
||||
* for a tiny race window in which enable_signaling is called during,
|
||||
* before, or after dma_fence_signal. To fight this, it is recommended
|
||||
* that before enable_signaling returns true an extra reference is
|
||||
* taken on the fence, to be released when the fence is signaled.
|
||||
* This will mean dma_fence_signal will still be called twice, but
|
||||
* the second time will be a noop since it was already signaled.
|
||||
*
|
||||
* Notes on signaled:
|
||||
* May set fence->error if returning true.
|
||||
*
|
||||
* Notes on wait:
|
||||
* Must not be NULL, set to dma_fence_default_wait for default implementation.
|
||||
* the dma_fence_default_wait implementation should work for any fence, as long
|
||||
* as enable_signaling works correctly.
|
||||
*
|
||||
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
|
||||
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
|
||||
* timed out. Can also return other error values on custom implementations,
|
||||
* which should be treated as if the fence is signaled. For example a hardware
|
||||
* lockup could be reported like that.
|
||||
*
|
||||
* Notes on release:
|
||||
* Can be NULL, this function allows additional commands to run on
|
||||
* destruction of the fence. Can be called from irq context.
|
||||
* If pointer is set to NULL, kfree will get called instead.
|
||||
*/
|
||||
|
||||
struct dma_fence_ops {
|
||||
/**
|
||||
* @get_driver_name:
|
||||
*
|
||||
* Returns the driver name. This is a callback to allow drivers to
|
||||
* compute the name at runtime, without having it to store permanently
|
||||
* for each fence, or build a cache of some sort.
|
||||
*
|
||||
* This callback is mandatory.
|
||||
*/
|
||||
const char * (*get_driver_name)(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* @get_timeline_name:
|
||||
*
|
||||
* Return the name of the context this fence belongs to. This is a
|
||||
* callback to allow drivers to compute the name at runtime, without
|
||||
* having it to store permanently for each fence, or build a cache of
|
||||
* some sort.
|
||||
*
|
||||
* This callback is mandatory.
|
||||
*/
|
||||
const char * (*get_timeline_name)(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* @enable_signaling:
|
||||
*
|
||||
* Enable software signaling of fence.
|
||||
*
|
||||
* For fence implementations that have the capability for hw->hw
|
||||
* signaling, they can implement this op to enable the necessary
|
||||
* interrupts, or insert commands into cmdstream, etc, to avoid these
|
||||
* costly operations for the common case where only hw->hw
|
||||
* synchronization is required. This is called in the first
|
||||
* dma_fence_wait() or dma_fence_add_callback() path to let the fence
|
||||
* implementation know that there is another driver waiting on the
|
||||
* signal (ie. hw->sw case).
|
||||
*
|
||||
* This function can be called from atomic context, but not
|
||||
* from irq context, so normal spinlocks can be used.
|
||||
*
|
||||
* A return value of false indicates the fence already passed,
|
||||
* or some failure occurred that made it impossible to enable
|
||||
* signaling. True indicates successful enabling.
|
||||
*
|
||||
* &dma_fence.error may be set in enable_signaling, but only when false
|
||||
* is returned.
|
||||
*
|
||||
* Since many implementations can call dma_fence_signal() even when before
|
||||
* @enable_signaling has been called there's a race window, where the
|
||||
* dma_fence_signal() might result in the final fence reference being
|
||||
* released and its memory freed. To avoid this, implementations of this
|
||||
* callback should grab their own reference using dma_fence_get(), to be
|
||||
* released when the fence is signalled (through e.g. the interrupt
|
||||
* handler).
|
||||
*
|
||||
* This callback is mandatory.
|
||||
*/
|
||||
bool (*enable_signaling)(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* @signaled:
|
||||
*
|
||||
* Peek whether the fence is signaled, as a fastpath optimization for
|
||||
* e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this
|
||||
* callback does not need to make any guarantees beyond that a fence
|
||||
* once indicates as signalled must always return true from this
|
||||
* callback. This callback may return false even if the fence has
|
||||
* completed already, in this case information hasn't propogated throug
|
||||
* the system yet. See also dma_fence_is_signaled().
|
||||
*
|
||||
* May set &dma_fence.error if returning true.
|
||||
*
|
||||
* This callback is optional.
|
||||
*/
|
||||
bool (*signaled)(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* @wait:
|
||||
*
|
||||
* Custom wait implementation, or dma_fence_default_wait.
|
||||
*
|
||||
* Must not be NULL, set to dma_fence_default_wait for default implementation.
|
||||
* the dma_fence_default_wait implementation should work for any fence, as long
|
||||
* as enable_signaling works correctly.
|
||||
*
|
||||
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
|
||||
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
|
||||
* timed out. Can also return other error values on custom implementations,
|
||||
* which should be treated as if the fence is signaled. For example a hardware
|
||||
* lockup could be reported like that.
|
||||
*
|
||||
* This callback is mandatory.
|
||||
*/
|
||||
signed long (*wait)(struct dma_fence *fence,
|
||||
bool intr, signed long timeout);
|
||||
|
||||
/**
|
||||
* @release:
|
||||
*
|
||||
* Called on destruction of fence to release additional resources.
|
||||
* Can be called from irq context. This callback is optional. If it is
|
||||
* NULL, then dma_fence_free() is instead called as the default
|
||||
* implementation.
|
||||
*/
|
||||
void (*release)(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* @fill_driver_data:
|
||||
*
|
||||
* Callback to fill in free-form debug info.
|
||||
*
|
||||
* Returns amount of bytes filled, or negative error on failure.
|
||||
*
|
||||
* This callback is optional.
|
||||
*/
|
||||
int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
|
||||
|
||||
/**
|
||||
* @fence_value_str:
|
||||
*
|
||||
* Callback to fill in free-form debug info specific to this fence, like
|
||||
* the sequence number.
|
||||
*
|
||||
* This callback is optional.
|
||||
*/
|
||||
void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
|
||||
|
||||
/**
|
||||
* @timeline_value_str:
|
||||
*
|
||||
* Fills in the current value of the timeline as a string, like the
|
||||
* sequence number. This should match what @fill_driver_data prints for
|
||||
* the most recently signalled fence (assuming no delayed signalling).
|
||||
*/
|
||||
void (*timeline_value_str)(struct dma_fence *fence,
|
||||
char *str, int size);
|
||||
};
|
||||
|
@ -189,7 +257,7 @@ void dma_fence_free(struct dma_fence *fence);
|
|||
|
||||
/**
|
||||
* dma_fence_put - decreases refcount of the fence
|
||||
* @fence: [in] fence to reduce refcount of
|
||||
* @fence: fence to reduce refcount of
|
||||
*/
|
||||
static inline void dma_fence_put(struct dma_fence *fence)
|
||||
{
|
||||
|
@ -199,7 +267,7 @@ static inline void dma_fence_put(struct dma_fence *fence)
|
|||
|
||||
/**
|
||||
* dma_fence_get - increases refcount of the fence
|
||||
* @fence: [in] fence to increase refcount of
|
||||
* @fence: fence to increase refcount of
|
||||
*
|
||||
* Returns the same fence, with refcount increased by 1.
|
||||
*/
|
||||
|
@ -213,7 +281,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
|
|||
/**
|
||||
* dma_fence_get_rcu - get a fence from a reservation_object_list with
|
||||
* rcu read lock
|
||||
* @fence: [in] fence to increase refcount of
|
||||
* @fence: fence to increase refcount of
|
||||
*
|
||||
* Function returns NULL if no refcount could be obtained, or the fence.
|
||||
*/
|
||||
|
@ -227,7 +295,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
|
|||
|
||||
/**
|
||||
* dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
|
||||
* @fencep: [in] pointer to fence to increase refcount of
|
||||
* @fencep: pointer to fence to increase refcount of
|
||||
*
|
||||
* Function returns NULL if no refcount could be obtained, or the fence.
|
||||
* This function handles acquiring a reference to a fence that may be
|
||||
|
@ -289,14 +357,16 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence);
|
|||
/**
|
||||
* dma_fence_is_signaled_locked - Return an indication if the fence
|
||||
* is signaled yet.
|
||||
* @fence: [in] the fence to check
|
||||
* @fence: the fence to check
|
||||
*
|
||||
* Returns true if the fence was already signaled, false if not. Since this
|
||||
* function doesn't enable signaling, it is not guaranteed to ever return
|
||||
* true if dma_fence_add_callback, dma_fence_wait or
|
||||
* dma_fence_enable_sw_signaling haven't been called before.
|
||||
* true if dma_fence_add_callback(), dma_fence_wait() or
|
||||
* dma_fence_enable_sw_signaling() haven't been called before.
|
||||
*
|
||||
* This function requires fence->lock to be held.
|
||||
* This function requires &dma_fence.lock to be held.
|
||||
*
|
||||
* See also dma_fence_is_signaled().
|
||||
*/
|
||||
static inline bool
|
||||
dma_fence_is_signaled_locked(struct dma_fence *fence)
|
||||
|
@ -314,17 +384,19 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
|
|||
|
||||
/**
|
||||
* dma_fence_is_signaled - Return an indication if the fence is signaled yet.
|
||||
* @fence: [in] the fence to check
|
||||
* @fence: the fence to check
|
||||
*
|
||||
* Returns true if the fence was already signaled, false if not. Since this
|
||||
* function doesn't enable signaling, it is not guaranteed to ever return
|
||||
* true if dma_fence_add_callback, dma_fence_wait or
|
||||
* dma_fence_enable_sw_signaling haven't been called before.
|
||||
* true if dma_fence_add_callback(), dma_fence_wait() or
|
||||
* dma_fence_enable_sw_signaling() haven't been called before.
|
||||
*
|
||||
* It's recommended for seqno fences to call dma_fence_signal when the
|
||||
* operation is complete, it makes it possible to prevent issues from
|
||||
* wraparound between time of issue and time of use by checking the return
|
||||
* value of this function before calling hardware-specific wait instructions.
|
||||
*
|
||||
* See also dma_fence_is_signaled_locked().
|
||||
*/
|
||||
static inline bool
|
||||
dma_fence_is_signaled(struct dma_fence *fence)
|
||||
|
@ -342,8 +414,8 @@ dma_fence_is_signaled(struct dma_fence *fence)
|
|||
|
||||
/**
|
||||
* __dma_fence_is_later - return if f1 is chronologically later than f2
|
||||
* @f1: [in] the first fence's seqno
|
||||
* @f2: [in] the second fence's seqno from the same context
|
||||
* @f1: the first fence's seqno
|
||||
* @f2: the second fence's seqno from the same context
|
||||
*
|
||||
* Returns true if f1 is chronologically later than f2. Both fences must be
|
||||
* from the same context, since a seqno is not common across contexts.
|
||||
|
@ -355,8 +427,8 @@ static inline bool __dma_fence_is_later(u32 f1, u32 f2)
|
|||
|
||||
/**
|
||||
* dma_fence_is_later - return if f1 is chronologically later than f2
|
||||
* @f1: [in] the first fence from the same context
|
||||
* @f2: [in] the second fence from the same context
|
||||
* @f1: the first fence from the same context
|
||||
* @f2: the second fence from the same context
|
||||
*
|
||||
* Returns true if f1 is chronologically later than f2. Both fences must be
|
||||
* from the same context, since a seqno is not re-used across contexts.
|
||||
|
@ -372,8 +444,8 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
|
|||
|
||||
/**
|
||||
* dma_fence_later - return the chronologically later fence
|
||||
* @f1: [in] the first fence from the same context
|
||||
* @f2: [in] the second fence from the same context
|
||||
* @f1: the first fence from the same context
|
||||
* @f2: the second fence from the same context
|
||||
*
|
||||
* Returns NULL if both fences are signaled, otherwise the fence that would be
|
||||
* signaled last. Both fences must be from the same context, since a seqno is
|
||||
|
@ -398,7 +470,7 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
|
|||
|
||||
/**
|
||||
* dma_fence_get_status_locked - returns the status upon completion
|
||||
* @fence: [in] the dma_fence to query
|
||||
* @fence: the dma_fence to query
|
||||
*
|
||||
* Drivers can supply an optional error status condition before they signal
|
||||
* the fence (to indicate whether the fence was completed due to an error
|
||||
|
@ -422,8 +494,8 @@ int dma_fence_get_status(struct dma_fence *fence);
|
|||
|
||||
/**
|
||||
* dma_fence_set_error - flag an error condition on the fence
|
||||
* @fence: [in] the dma_fence
|
||||
* @error: [in] the error to store
|
||||
* @fence: the dma_fence
|
||||
* @error: the error to store
|
||||
*
|
||||
* Drivers can supply an optional error status condition before they signal
|
||||
* the fence, to indicate that the fence was completed due to an error
|
||||
|
@ -449,8 +521,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
|
|||
|
||||
/**
|
||||
* dma_fence_wait - sleep until the fence gets signaled
|
||||
* @fence: [in] the fence to wait on
|
||||
* @intr: [in] if true, do an interruptible wait
|
||||
* @fence: the fence to wait on
|
||||
* @intr: if true, do an interruptible wait
|
||||
*
|
||||
* This function will return -ERESTARTSYS if interrupted by a signal,
|
||||
* or 0 if the fence was signaled. Other error values may be
|
||||
|
@ -459,6 +531,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
|
|||
* Performs a synchronous wait on this fence. It is assumed the caller
|
||||
* directly or indirectly holds a reference to the fence, otherwise the
|
||||
* fence might be freed before return, resulting in undefined behavior.
|
||||
*
|
||||
* See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout().
|
||||
*/
|
||||
static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
|
||||
{
|
||||
|
|
|
@ -680,6 +680,13 @@ struct drm_get_cap {
|
|||
*/
|
||||
#define DRM_CLIENT_CAP_ATOMIC 3
|
||||
|
||||
/**
|
||||
* DRM_CLIENT_CAP_ASPECT_RATIO
|
||||
*
|
||||
* If set to 1, the DRM core will provide aspect ratio information in modes.
|
||||
*/
|
||||
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
|
||||
|
||||
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
|
||||
struct drm_set_client_cap {
|
||||
__u64 capability;
|
||||
|
|
|
@ -93,6 +93,8 @@ extern "C" {
|
|||
#define DRM_MODE_PICTURE_ASPECT_NONE 0
|
||||
#define DRM_MODE_PICTURE_ASPECT_4_3 1
|
||||
#define DRM_MODE_PICTURE_ASPECT_16_9 2
|
||||
#define DRM_MODE_PICTURE_ASPECT_64_27 3
|
||||
#define DRM_MODE_PICTURE_ASPECT_256_135 4
|
||||
|
||||
/* Aspect ratio flag bitmask (4 bits 22:19) */
|
||||
#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
|
||||
|
@ -102,6 +104,10 @@ extern "C" {
|
|||
(DRM_MODE_PICTURE_ASPECT_4_3<<19)
|
||||
#define DRM_MODE_FLAG_PIC_AR_16_9 \
|
||||
(DRM_MODE_PICTURE_ASPECT_16_9<<19)
|
||||
#define DRM_MODE_FLAG_PIC_AR_64_27 \
|
||||
(DRM_MODE_PICTURE_ASPECT_64_27<<19)
|
||||
#define DRM_MODE_FLAG_PIC_AR_256_135 \
|
||||
(DRM_MODE_PICTURE_ASPECT_256_135<<19)
|
||||
|
||||
#define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \
|
||||
DRM_MODE_FLAG_NHSYNC | \
|
||||
|
|
|
@ -0,0 +1,194 @@
|
|||
/*
|
||||
* Copyright © 2014-2018 Broadcom
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _V3D_DRM_H_
|
||||
#define _V3D_DRM_H_
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define DRM_V3D_SUBMIT_CL 0x00
|
||||
#define DRM_V3D_WAIT_BO 0x01
|
||||
#define DRM_V3D_CREATE_BO 0x02
|
||||
#define DRM_V3D_MMAP_BO 0x03
|
||||
#define DRM_V3D_GET_PARAM 0x04
|
||||
#define DRM_V3D_GET_BO_OFFSET 0x05
|
||||
|
||||
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
|
||||
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
|
||||
#define DRM_IOCTL_V3D_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo)
|
||||
#define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
|
||||
#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
|
||||
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
|
||||
|
||||
/**
|
||||
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
|
||||
* engine.
|
||||
*
|
||||
* This asks the kernel to have the GPU execute an optional binner
|
||||
* command list, and a render command list.
|
||||
*/
|
||||
struct drm_v3d_submit_cl {
|
||||
/* Pointer to the binner command list.
|
||||
*
|
||||
* This is the first set of commands executed, which runs the
|
||||
* coordinate shader to determine where primitives land on the screen,
|
||||
* then writes out the state updates and draw calls necessary per tile
|
||||
* to the tile allocation BO.
|
||||
*/
|
||||
__u32 bcl_start;
|
||||
|
||||
/** End address of the BCL (first byte after the BCL) */
|
||||
__u32 bcl_end;
|
||||
|
||||
/* Offset of the render command list.
|
||||
*
|
||||
* This is the second set of commands executed, which will either
|
||||
* execute the tiles that have been set up by the BCL, or a fixed set
|
||||
* of tiles (in the case of RCL-only blits).
|
||||
*/
|
||||
__u32 rcl_start;
|
||||
|
||||
/** End address of the RCL (first byte after the RCL) */
|
||||
__u32 rcl_end;
|
||||
|
||||
/** An optional sync object to wait on before starting the BCL. */
|
||||
__u32 in_sync_bcl;
|
||||
/** An optional sync object to wait on before starting the RCL. */
|
||||
__u32 in_sync_rcl;
|
||||
/** An optional sync object to place the completion fence in. */
|
||||
__u32 out_sync;
|
||||
|
||||
/* Offset of the tile alloc memory
|
||||
*
|
||||
* This is optional on V3D 3.3 (where the CL can set the value) but
|
||||
* required on V3D 4.1.
|
||||
*/
|
||||
__u32 qma;
|
||||
|
||||
/** Size of the tile alloc memory. */
|
||||
__u32 qms;
|
||||
|
||||
/** Offset of the tile state data array. */
|
||||
__u32 qts;
|
||||
|
||||
/* Pointer to a u32 array of the BOs that are referenced by the job.
|
||||
*/
|
||||
__u64 bo_handles;
|
||||
|
||||
/* Number of BO handles passed in (size is that times 4). */
|
||||
__u32 bo_handle_count;
|
||||
|
||||
/* Pad, must be zero-filled. */
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_v3d_wait_bo - ioctl argument for waiting for
|
||||
* completion of the last DRM_V3D_SUBMIT_CL on a BO.
|
||||
*
|
||||
* This is useful for cases where multiple processes might be
|
||||
* rendering to a BO and you want to wait for all rendering to be
|
||||
* completed.
|
||||
*/
|
||||
struct drm_v3d_wait_bo {
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
__u64 timeout_ns;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_v3d_create_bo - ioctl argument for creating V3D BOs.
|
||||
*
|
||||
* There are currently no values for the flags argument, but it may be
|
||||
* used in a future extension.
|
||||
*/
|
||||
struct drm_v3d_create_bo {
|
||||
__u32 size;
|
||||
__u32 flags;
|
||||
/** Returned GEM handle for the BO. */
|
||||
__u32 handle;
|
||||
/**
|
||||
* Returned offset for the BO in the V3D address space. This offset
|
||||
* is private to the DRM fd and is valid for the lifetime of the GEM
|
||||
* handle.
|
||||
*
|
||||
* This offset value will always be nonzero, since various HW
|
||||
* units treat 0 specially.
|
||||
*/
|
||||
__u32 offset;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_v3d_mmap_bo - ioctl argument for mapping V3D BOs.
|
||||
*
|
||||
* This doesn't actually perform an mmap. Instead, it returns the
|
||||
* offset you need to use in an mmap on the DRM device node. This
|
||||
* means that tools like valgrind end up knowing about the mapped
|
||||
* memory.
|
||||
*
|
||||
* There are currently no values for the flags argument, but it may be
|
||||
* used in a future extension.
|
||||
*/
|
||||
struct drm_v3d_mmap_bo {
|
||||
/** Handle for the object being mapped. */
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
/** offset into the drm node to use for subsequent mmap call. */
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
enum drm_v3d_param {
|
||||
DRM_V3D_PARAM_V3D_UIFCFG,
|
||||
DRM_V3D_PARAM_V3D_HUB_IDENT1,
|
||||
DRM_V3D_PARAM_V3D_HUB_IDENT2,
|
||||
DRM_V3D_PARAM_V3D_HUB_IDENT3,
|
||||
DRM_V3D_PARAM_V3D_CORE0_IDENT0,
|
||||
DRM_V3D_PARAM_V3D_CORE0_IDENT1,
|
||||
DRM_V3D_PARAM_V3D_CORE0_IDENT2,
|
||||
};
|
||||
|
||||
struct drm_v3d_get_param {
|
||||
__u32 param;
|
||||
__u32 pad;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the offset for the BO in the V3D address space for this DRM fd.
|
||||
* This is the same value returned by drm_v3d_create_bo, if that was called
|
||||
* from this DRM fd.
|
||||
*/
|
||||
struct drm_v3d_get_bo_offset {
|
||||
__u32 handle;
|
||||
__u32 offset;
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _V3D_DRM_H_ */
|
|
@ -183,10 +183,17 @@ struct drm_vc4_submit_cl {
|
|||
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
|
||||
__u32 perfmonid;
|
||||
|
||||
/* Unused field to align this struct on 64 bits. Must be set to 0.
|
||||
* If one ever needs to add an u32 field to this struct, this field
|
||||
* can be used.
|
||||
/* Syncobj handle to wait on. If set, processing of this render job
|
||||
* will not start until the syncobj is signaled. 0 means ignore.
|
||||
*/
|
||||
__u32 in_sync;
|
||||
|
||||
/* Syncobj handle to export fence to. If set, the fence in the syncobj
|
||||
* will be replaced with a fence that signals upon completion of this
|
||||
* render job. 0 means ignore.
|
||||
*/
|
||||
__u32 out_sync;
|
||||
|
||||
__u32 pad2;
|
||||
};
|
||||
|
||||
|
|
|
@ -260,6 +260,7 @@ struct virtio_gpu_cmd_submit {
|
|||
};
|
||||
|
||||
#define VIRTIO_GPU_CAPSET_VIRGL 1
|
||||
#define VIRTIO_GPU_CAPSET_VIRGL2 2
|
||||
|
||||
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
|
||||
struct virtio_gpu_get_capset_info {
|
||||
|
|
Loading…
Reference in New Issue