mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: add core driver (v4)
This adds the non-asic specific core driver code. v2: remove extra kconfig option v3: implement minor fixes from Fengguang Wu v4: fix cast in amdgpu_ucode.c Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jammy Zhou <Jammy.Zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
97b2e202fb
commit
d38ceaf99e
|
@ -120,6 +120,27 @@ config DRM_RADEON
|
|||
|
||||
source "drivers/gpu/drm/radeon/Kconfig"
|
||||
|
||||
config DRM_AMDGPU
|
||||
tristate "AMD GPU"
|
||||
depends on DRM && PCI
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_IMAGEBLIT
|
||||
select FW_LOADER
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_KMS_FB_HELPER
|
||||
select DRM_TTM
|
||||
select POWER_SUPPLY
|
||||
select HWMON
|
||||
select BACKLIGHT_CLASS_DEVICE
|
||||
select INTERVAL_TREE
|
||||
help
|
||||
Choose this option if you have a recent AMD Radeon graphics card.
|
||||
|
||||
If M is selected, the module will be called amdgpu.
|
||||
|
||||
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/nouveau/Kconfig"
|
||||
|
||||
config DRM_I810
|
||||
|
|
|
@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_TDFX) += tdfx/
|
|||
obj-$(CONFIG_DRM_R128) += r128/
|
||||
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
|
||||
obj-$(CONFIG_DRM_RADEON)+= radeon/
|
||||
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
|
||||
obj-$(CONFIG_DRM_MGA) += mga/
|
||||
obj-$(CONFIG_DRM_I810) += i810/
|
||||
obj-$(CONFIG_DRM_I915) += i915/
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
config DRM_AMDGPU_CIK
|
||||
bool "Enable amdgpu support for CIK parts"
|
||||
depends on DRM_AMDGPU
|
||||
help
|
||||
Choose this option if you want to enable experimental support
|
||||
for CIK asics.
|
||||
|
||||
CIK is already supported in radeon. CIK support in amdgpu
|
||||
is for experimentation and testing.
|
||||
|
||||
config DRM_AMDGPU_USERPTR
|
||||
bool "Always enable userptr write support"
|
||||
depends on DRM_AMDGPU
|
||||
select MMU_NOTIFIER
|
||||
help
|
||||
This option selects CONFIG_MMU_NOTIFIER if it isn't already
|
||||
selected to enabled full userptr support.
|
|
@ -0,0 +1,49 @@
|
|||
#
|
||||
# Makefile for the drm device driver. This driver provides support for the
|
||||
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
|
||||
|
||||
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg
|
||||
|
||||
amdgpu-y := amdgpu_drv.o
|
||||
|
||||
# add KMS driver
|
||||
amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
||||
amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
|
||||
atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
|
||||
amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
|
||||
amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
|
||||
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
|
||||
amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
|
||||
atombios_encoders.o amdgpu_semaphore.o amdgpu_sa.o atombios_i2c.o \
|
||||
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
|
||||
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
|
||||
|
||||
# add IH block
|
||||
amdgpu-y += \
|
||||
amdgpu_irq.o \
|
||||
amdgpu_ih.o
|
||||
|
||||
# add SMC block
|
||||
amdgpu-y += \
|
||||
amdgpu_dpm.o
|
||||
|
||||
# add GFX block
|
||||
amdgpu-y += \
|
||||
amdgpu_gfx.o
|
||||
|
||||
# add UVD block
|
||||
amdgpu-y += \
|
||||
amdgpu_uvd.o
|
||||
|
||||
# add VCE block
|
||||
amdgpu-y += \
|
||||
amdgpu_vce.o
|
||||
|
||||
amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
|
||||
amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
|
||||
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
|
||||
amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
|
||||
|
||||
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
|
||||
|
||||
CFLAGS_amdgpu_trace_points.o := -I$(src)
|
|
@ -0,0 +1,768 @@
|
|||
/*
|
||||
* Copyright 2012 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/power_supply.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <acpi/video.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_acpi.h"
|
||||
#include "atom.h"
|
||||
|
||||
#define ACPI_AC_CLASS "ac_adapter"
|
||||
|
||||
extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
|
||||
|
||||
struct atif_verify_interface {
|
||||
u16 size; /* structure size in bytes (includes size field) */
|
||||
u16 version; /* version */
|
||||
u32 notification_mask; /* supported notifications mask */
|
||||
u32 function_bits; /* supported functions bit vector */
|
||||
} __packed;
|
||||
|
||||
struct atif_system_params {
|
||||
u16 size; /* structure size in bytes (includes size field) */
|
||||
u32 valid_mask; /* valid flags mask */
|
||||
u32 flags; /* flags */
|
||||
u8 command_code; /* notify command code */
|
||||
} __packed;
|
||||
|
||||
struct atif_sbios_requests {
|
||||
u16 size; /* structure size in bytes (includes size field) */
|
||||
u32 pending; /* pending sbios requests */
|
||||
u8 panel_exp_mode; /* panel expansion mode */
|
||||
u8 thermal_gfx; /* thermal state: target gfx controller */
|
||||
u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */
|
||||
u8 forced_power_gfx; /* forced power state: target gfx controller */
|
||||
u8 forced_power_state; /* forced power state: state id */
|
||||
u8 system_power_src; /* system power source */
|
||||
u8 backlight_level; /* panel backlight level (0-255) */
|
||||
} __packed;
|
||||
|
||||
#define ATIF_NOTIFY_MASK 0x3
|
||||
#define ATIF_NOTIFY_NONE 0
|
||||
#define ATIF_NOTIFY_81 1
|
||||
#define ATIF_NOTIFY_N 2
|
||||
|
||||
struct atcs_verify_interface {
|
||||
u16 size; /* structure size in bytes (includes size field) */
|
||||
u16 version; /* version */
|
||||
u32 function_bits; /* supported functions bit vector */
|
||||
} __packed;
|
||||
|
||||
#define ATCS_VALID_FLAGS_MASK 0x3
|
||||
|
||||
struct atcs_pref_req_input {
|
||||
u16 size; /* structure size in bytes (includes size field) */
|
||||
u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
|
||||
u16 valid_flags_mask; /* valid flags mask */
|
||||
u16 flags; /* flags */
|
||||
u8 req_type; /* request type */
|
||||
u8 perf_req; /* performance request */
|
||||
} __packed;
|
||||
|
||||
struct atcs_pref_req_output {
|
||||
u16 size; /* structure size in bytes (includes size field) */
|
||||
u8 ret_val; /* return value */
|
||||
} __packed;
|
||||
|
||||
/* Call the ATIF method
|
||||
*/
|
||||
/**
|
||||
* amdgpu_atif_call - call an ATIF method
|
||||
*
|
||||
* @handle: acpi handle
|
||||
* @function: the ATIF function to execute
|
||||
* @params: ATIF function params
|
||||
*
|
||||
* Executes the requested ATIF function (all asics).
|
||||
* Returns a pointer to the acpi output buffer.
|
||||
*/
|
||||
static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
|
||||
struct acpi_buffer *params)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_object atif_arg_elements[2];
|
||||
struct acpi_object_list atif_arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
|
||||
atif_arg.count = 2;
|
||||
atif_arg.pointer = &atif_arg_elements[0];
|
||||
|
||||
atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
|
||||
atif_arg_elements[0].integer.value = function;
|
||||
|
||||
if (params) {
|
||||
atif_arg_elements[1].type = ACPI_TYPE_BUFFER;
|
||||
atif_arg_elements[1].buffer.length = params->length;
|
||||
atif_arg_elements[1].buffer.pointer = params->pointer;
|
||||
} else {
|
||||
/* We need a second fake parameter */
|
||||
atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
|
||||
atif_arg_elements[1].integer.value = 0;
|
||||
}
|
||||
|
||||
status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
|
||||
|
||||
/* Fail only if calling the method fails and ATIF is supported */
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
|
||||
acpi_format_exception(status));
|
||||
kfree(buffer.pointer);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return buffer.pointer;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atif_parse_notification - parse supported notifications
|
||||
*
|
||||
* @n: supported notifications struct
|
||||
* @mask: supported notifications mask from ATIF
|
||||
*
|
||||
* Use the supported notifications mask from ATIF function
|
||||
* ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
|
||||
* are supported (all asics).
|
||||
*/
|
||||
static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
|
||||
{
|
||||
n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
|
||||
n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
|
||||
n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
|
||||
n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
|
||||
n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
|
||||
n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
|
||||
n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
|
||||
n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
|
||||
n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atif_parse_functions - parse supported functions
|
||||
*
|
||||
* @f: supported functions struct
|
||||
* @mask: supported functions mask from ATIF
|
||||
*
|
||||
* Use the supported functions mask from ATIF function
|
||||
* ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
|
||||
* are supported (all asics).
|
||||
*/
|
||||
static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mask)
|
||||
{
|
||||
f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
|
||||
f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
|
||||
f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
|
||||
f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
|
||||
f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
|
||||
f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
|
||||
f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
|
||||
f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
|
||||
f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
|
||||
f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atif_verify_interface - verify ATIF
|
||||
*
|
||||
* @handle: acpi handle
|
||||
* @atif: amdgpu atif struct
|
||||
*
|
||||
* Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
|
||||
* to initialize ATIF and determine what features are supported
|
||||
* (all asics).
|
||||
* returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atif_verify_interface(acpi_handle handle,
|
||||
struct amdgpu_atif *atif)
|
||||
{
|
||||
union acpi_object *info;
|
||||
struct atif_verify_interface output;
|
||||
size_t size;
|
||||
int err = 0;
|
||||
|
||||
info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
|
||||
memset(&output, 0, sizeof(output));
|
||||
|
||||
size = *(u16 *) info->buffer.pointer;
|
||||
if (size < 12) {
|
||||
DRM_INFO("ATIF buffer is too small: %zu\n", size);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
size = min(sizeof(output), size);
|
||||
|
||||
memcpy(&output, info->buffer.pointer, size);
|
||||
|
||||
/* TODO: check version? */
|
||||
DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
|
||||
|
||||
amdgpu_atif_parse_notification(&atif->notifications, output.notification_mask);
|
||||
amdgpu_atif_parse_functions(&atif->functions, output.function_bits);
|
||||
|
||||
out:
|
||||
kfree(info);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atif_get_notification_params - determine notify configuration
|
||||
*
|
||||
* @handle: acpi handle
|
||||
* @n: atif notification configuration struct
|
||||
*
|
||||
* Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
|
||||
* to determine if a notifier is used and if so which one
|
||||
* (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n)
|
||||
* where n is specified in the result if a notifier is used.
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atif_get_notification_params(acpi_handle handle,
|
||||
struct amdgpu_atif_notification_cfg *n)
|
||||
{
|
||||
union acpi_object *info;
|
||||
struct atif_system_params params;
|
||||
size_t size;
|
||||
int err = 0;
|
||||
|
||||
info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
|
||||
if (!info) {
|
||||
err = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
size = *(u16 *) info->buffer.pointer;
|
||||
if (size < 10) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
size = min(sizeof(params), size);
|
||||
memcpy(¶ms, info->buffer.pointer, size);
|
||||
|
||||
DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
|
||||
params.flags, params.valid_mask);
|
||||
params.flags = params.flags & params.valid_mask;
|
||||
|
||||
if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
|
||||
n->enabled = false;
|
||||
n->command_code = 0;
|
||||
} else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
|
||||
n->enabled = true;
|
||||
n->command_code = 0x81;
|
||||
} else {
|
||||
if (size < 11) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
n->enabled = true;
|
||||
n->command_code = params.command_code;
|
||||
}
|
||||
|
||||
out:
|
||||
DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
|
||||
(n->enabled ? "enabled" : "disabled"),
|
||||
n->command_code);
|
||||
kfree(info);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atif_get_sbios_requests - get requested sbios event
|
||||
*
|
||||
* @handle: acpi handle
|
||||
* @req: atif sbios request struct
|
||||
*
|
||||
* Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
|
||||
* to determine what requests the sbios is making to the driver
|
||||
* (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
|
||||
struct atif_sbios_requests *req)
|
||||
{
|
||||
union acpi_object *info;
|
||||
size_t size;
|
||||
int count = 0;
|
||||
|
||||
info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
|
||||
size = *(u16 *)info->buffer.pointer;
|
||||
if (size < 0xd) {
|
||||
count = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
memset(req, 0, sizeof(*req));
|
||||
|
||||
size = min(sizeof(*req), size);
|
||||
memcpy(req, info->buffer.pointer, size);
|
||||
DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
|
||||
|
||||
count = hweight32(req->pending);
|
||||
|
||||
out:
|
||||
kfree(info);
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atif_handler - handle ATIF notify requests
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @event: atif sbios request struct
|
||||
*
|
||||
* Checks the acpi event and if it matches an atif event,
|
||||
* handles it.
|
||||
* Returns NOTIFY code
|
||||
*/
|
||||
int amdgpu_atif_handler(struct amdgpu_device *adev,
|
||||
struct acpi_bus_event *event)
|
||||
{
|
||||
struct amdgpu_atif *atif = &adev->atif;
|
||||
struct atif_sbios_requests req;
|
||||
acpi_handle handle;
|
||||
int count;
|
||||
|
||||
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
|
||||
event->device_class, event->type);
|
||||
|
||||
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!atif->notification_cfg.enabled ||
|
||||
event->type != atif->notification_cfg.command_code)
|
||||
/* Not our event */
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Check pending SBIOS requests */
|
||||
handle = ACPI_HANDLE(&adev->pdev->dev);
|
||||
count = amdgpu_atif_get_sbios_requests(handle, &req);
|
||||
|
||||
if (count <= 0)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
|
||||
|
||||
if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
|
||||
struct amdgpu_encoder *enc = atif->encoder_for_bl;
|
||||
|
||||
if (enc) {
|
||||
struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
|
||||
|
||||
DRM_DEBUG_DRIVER("Changing brightness to %d\n",
|
||||
req.backlight_level);
|
||||
|
||||
amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
|
||||
|
||||
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
|
||||
backlight_force_update(dig->bl_dev,
|
||||
BACKLIGHT_UPDATE_HOTKEY);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
/* TODO: check other events */
|
||||
|
||||
/* We've handled the event, stop the notifier chain. The ACPI interface
|
||||
* overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
|
||||
* userspace if the event was generated only to signal a SBIOS
|
||||
* request.
|
||||
*/
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
|
||||
/* Call the ATCS method
|
||||
*/
|
||||
/**
|
||||
* amdgpu_atcs_call - call an ATCS method
|
||||
*
|
||||
* @handle: acpi handle
|
||||
* @function: the ATCS function to execute
|
||||
* @params: ATCS function params
|
||||
*
|
||||
* Executes the requested ATCS function (all asics).
|
||||
* Returns a pointer to the acpi output buffer.
|
||||
*/
|
||||
static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function,
|
||||
struct acpi_buffer *params)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_object atcs_arg_elements[2];
|
||||
struct acpi_object_list atcs_arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
|
||||
atcs_arg.count = 2;
|
||||
atcs_arg.pointer = &atcs_arg_elements[0];
|
||||
|
||||
atcs_arg_elements[0].type = ACPI_TYPE_INTEGER;
|
||||
atcs_arg_elements[0].integer.value = function;
|
||||
|
||||
if (params) {
|
||||
atcs_arg_elements[1].type = ACPI_TYPE_BUFFER;
|
||||
atcs_arg_elements[1].buffer.length = params->length;
|
||||
atcs_arg_elements[1].buffer.pointer = params->pointer;
|
||||
} else {
|
||||
/* We need a second fake parameter */
|
||||
atcs_arg_elements[1].type = ACPI_TYPE_INTEGER;
|
||||
atcs_arg_elements[1].integer.value = 0;
|
||||
}
|
||||
|
||||
status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
|
||||
|
||||
/* Fail only if calling the method fails and ATIF is supported */
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
|
||||
acpi_format_exception(status));
|
||||
kfree(buffer.pointer);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return buffer.pointer;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atcs_parse_functions - parse supported functions
|
||||
*
|
||||
* @f: supported functions struct
|
||||
* @mask: supported functions mask from ATCS
|
||||
*
|
||||
* Use the supported functions mask from ATCS function
|
||||
* ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
|
||||
* are supported (all asics).
|
||||
*/
|
||||
static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mask)
|
||||
{
|
||||
f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
|
||||
f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
|
||||
f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
|
||||
f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atcs_verify_interface - verify ATCS
|
||||
*
|
||||
* @handle: acpi handle
|
||||
* @atcs: amdgpu atcs struct
|
||||
*
|
||||
* Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
|
||||
* to initialize ATCS and determine what features are supported
|
||||
* (all asics).
|
||||
* returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atcs_verify_interface(acpi_handle handle,
|
||||
struct amdgpu_atcs *atcs)
|
||||
{
|
||||
union acpi_object *info;
|
||||
struct atcs_verify_interface output;
|
||||
size_t size;
|
||||
int err = 0;
|
||||
|
||||
info = amdgpu_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
|
||||
memset(&output, 0, sizeof(output));
|
||||
|
||||
size = *(u16 *) info->buffer.pointer;
|
||||
if (size < 8) {
|
||||
DRM_INFO("ATCS buffer is too small: %zu\n", size);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
size = min(sizeof(output), size);
|
||||
|
||||
memcpy(&output, info->buffer.pointer, size);
|
||||
|
||||
/* TODO: check version? */
|
||||
DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
|
||||
|
||||
amdgpu_atcs_parse_functions(&atcs->functions, output.function_bits);
|
||||
|
||||
out:
|
||||
kfree(info);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_is_pcie_performance_request_supported
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Check if the ATCS pcie_perf_req and pcie_dev_rdy methods
|
||||
* are supported (all asics).
|
||||
* returns true if supported, false if not.
|
||||
*/
|
||||
bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_atcs *atcs = &adev->atcs;
|
||||
|
||||
if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_pcie_notify_device_ready
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Executes the PCIE_DEVICE_READY_NOTIFICATION method
|
||||
* (all asics).
|
||||
* returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
|
||||
{
|
||||
acpi_handle handle;
|
||||
union acpi_object *info;
|
||||
struct amdgpu_atcs *atcs = &adev->atcs;
|
||||
|
||||
/* Get the device handle */
|
||||
handle = ACPI_HANDLE(&adev->pdev->dev);
|
||||
if (!handle)
|
||||
return -EINVAL;
|
||||
|
||||
if (!atcs->functions.pcie_dev_rdy)
|
||||
return -EINVAL;
|
||||
|
||||
info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
|
||||
kfree(info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_pcie_performance_request
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @perf_req: requested perf level (pcie gen speed)
|
||||
* @advertise: set advertise caps flag if set
|
||||
*
|
||||
* Executes the PCIE_PERFORMANCE_REQUEST method to
|
||||
* change the pcie gen speed (all asics).
|
||||
* returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
|
||||
u8 perf_req, bool advertise)
|
||||
{
|
||||
acpi_handle handle;
|
||||
union acpi_object *info;
|
||||
struct amdgpu_atcs *atcs = &adev->atcs;
|
||||
struct atcs_pref_req_input atcs_input;
|
||||
struct atcs_pref_req_output atcs_output;
|
||||
struct acpi_buffer params;
|
||||
size_t size;
|
||||
u32 retry = 3;
|
||||
|
||||
/* Get the device handle */
|
||||
handle = ACPI_HANDLE(&adev->pdev->dev);
|
||||
if (!handle)
|
||||
return -EINVAL;
|
||||
|
||||
if (!atcs->functions.pcie_perf_req)
|
||||
return -EINVAL;
|
||||
|
||||
atcs_input.size = sizeof(struct atcs_pref_req_input);
|
||||
/* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
|
||||
atcs_input.client_id = adev->pdev->devfn | (adev->pdev->bus->number << 8);
|
||||
atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
|
||||
atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
|
||||
if (advertise)
|
||||
atcs_input.flags |= ATCS_ADVERTISE_CAPS;
|
||||
atcs_input.req_type = ATCS_PCIE_LINK_SPEED;
|
||||
atcs_input.perf_req = perf_req;
|
||||
|
||||
params.length = sizeof(struct atcs_pref_req_input);
|
||||
params.pointer = &atcs_input;
|
||||
|
||||
while (retry--) {
|
||||
info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, ¶ms);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
|
||||
memset(&atcs_output, 0, sizeof(atcs_output));
|
||||
|
||||
size = *(u16 *) info->buffer.pointer;
|
||||
if (size < 3) {
|
||||
DRM_INFO("ATCS buffer is too small: %zu\n", size);
|
||||
kfree(info);
|
||||
return -EINVAL;
|
||||
}
|
||||
size = min(sizeof(atcs_output), size);
|
||||
|
||||
memcpy(&atcs_output, info->buffer.pointer, size);
|
||||
|
||||
kfree(info);
|
||||
|
||||
switch (atcs_output.ret_val) {
|
||||
case ATCS_REQUEST_REFUSED:
|
||||
default:
|
||||
return -EINVAL;
|
||||
case ATCS_REQUEST_COMPLETE:
|
||||
return 0;
|
||||
case ATCS_REQUEST_IN_PROGRESS:
|
||||
udelay(10);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_event - handle notify events
|
||||
*
|
||||
* @nb: notifier block
|
||||
* @val: val
|
||||
* @data: acpi event
|
||||
*
|
||||
* Calls relevant amdgpu functions in response to various
|
||||
* acpi events.
|
||||
* Returns NOTIFY code
|
||||
*/
|
||||
static int amdgpu_acpi_event(struct notifier_block *nb,
|
||||
unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, acpi_nb);
|
||||
struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
|
||||
|
||||
if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
|
||||
if (power_supply_is_system_supplied() > 0)
|
||||
DRM_DEBUG_DRIVER("pm: AC\n");
|
||||
else
|
||||
DRM_DEBUG_DRIVER("pm: DC\n");
|
||||
|
||||
amdgpu_pm_acpi_event_handler(adev);
|
||||
}
|
||||
|
||||
/* Check for pending SBIOS requests */
|
||||
return amdgpu_atif_handler(adev, entry);
|
||||
}
|
||||
|
||||
/* Call all ACPI methods here */
|
||||
/**
|
||||
* amdgpu_acpi_init - init driver acpi support
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Verifies the AMD ACPI interfaces and registers with the acpi
|
||||
* notifier chain (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_acpi_init(struct amdgpu_device *adev)
|
||||
{
|
||||
acpi_handle handle;
|
||||
struct amdgpu_atif *atif = &adev->atif;
|
||||
struct amdgpu_atcs *atcs = &adev->atcs;
|
||||
int ret;
|
||||
|
||||
/* Get the device handle */
|
||||
handle = ACPI_HANDLE(&adev->pdev->dev);
|
||||
|
||||
if (!adev->bios || !handle)
|
||||
return 0;
|
||||
|
||||
/* Call the ATCS method */
|
||||
ret = amdgpu_atcs_verify_interface(handle, atcs);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
|
||||
}
|
||||
|
||||
/* Call the ATIF method */
|
||||
ret = amdgpu_atif_verify_interface(handle, atif);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (atif->notifications.brightness_change) {
|
||||
struct drm_encoder *tmp;
|
||||
|
||||
/* Find the encoder controlling the brightness */
|
||||
list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list,
|
||||
head) {
|
||||
struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);
|
||||
|
||||
if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
|
||||
enc->enc_priv) {
|
||||
if (adev->is_atom_bios) {
|
||||
struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
|
||||
if (dig->bl_dev) {
|
||||
atif->encoder_for_bl = enc;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (atif->functions.sbios_requests && !atif->functions.system_params) {
|
||||
/* XXX check this workraround, if sbios request function is
|
||||
* present we have to see how it's configured in the system
|
||||
* params
|
||||
*/
|
||||
atif->functions.system_params = true;
|
||||
}
|
||||
|
||||
if (atif->functions.system_params) {
|
||||
ret = amdgpu_atif_get_notification_params(handle,
|
||||
&atif->notification_cfg);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
|
||||
ret);
|
||||
/* Disable notification */
|
||||
atif->notification_cfg.enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
adev->acpi_nb.notifier_call = amdgpu_acpi_event;
|
||||
register_acpi_notifier(&adev->acpi_nb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_fini - tear down driver acpi support
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Unregisters with the acpi notifier chain (all asics).
|
||||
*/
|
||||
void amdgpu_acpi_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
unregister_acpi_notifier(&adev->acpi_nb);
|
||||
}
|
|
@ -0,0 +1,445 @@
|
|||
/*
|
||||
* Copyright 2012 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef AMDGPU_ACPI_H
|
||||
#define AMDGPU_ACPI_H
|
||||
|
||||
struct amdgpu_device;
|
||||
struct acpi_bus_event;
|
||||
|
||||
int amdgpu_atif_handler(struct amdgpu_device *adev,
|
||||
struct acpi_bus_event *event);
|
||||
|
||||
/* AMD hw uses four ACPI control methods:
|
||||
* 1. ATIF
|
||||
* ARG0: (ACPI_INTEGER) function code
|
||||
* ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
|
||||
* OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
|
||||
* ATIF provides an entry point for the gfx driver to interact with the sbios.
|
||||
* The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom
|
||||
* notification. Which notification is used as indicated by the ATIF Control
|
||||
* Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or
|
||||
* a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS
|
||||
* to identify pending System BIOS requests and associated parameters. For
|
||||
* example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver
|
||||
* will perform display device detection and invoke ATIF Control Method
|
||||
* SELECT_ACTIVE_DISPLAYS.
|
||||
*
|
||||
* 2. ATPX
|
||||
* ARG0: (ACPI_INTEGER) function code
|
||||
* ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
|
||||
* OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
|
||||
* ATPX methods are used on PowerXpress systems to handle mux switching and
|
||||
* discrete GPU power control.
|
||||
*
|
||||
* 3. ATRM
|
||||
* ARG0: (ACPI_INTEGER) offset of vbios rom data
|
||||
* ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K).
|
||||
* OUTPUT: (ACPI_BUFFER) output buffer
|
||||
* ATRM provides an interfacess to access the discrete GPU vbios image on
|
||||
* PowerXpress systems with multiple GPUs.
|
||||
*
|
||||
* 4. ATCS
|
||||
* ARG0: (ACPI_INTEGER) function code
|
||||
* ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
|
||||
* OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
|
||||
* ATCS provides an interface to AMD chipset specific functionality.
|
||||
*
|
||||
*/
|
||||
/* ATIF */
|
||||
#define ATIF_FUNCTION_VERIFY_INTERFACE 0x0
|
||||
/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - version
|
||||
* DWORD - supported notifications mask
|
||||
* DWORD - supported functions bit vector
|
||||
*/
|
||||
/* Notifications mask */
|
||||
# define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0)
|
||||
# define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1)
|
||||
# define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2)
|
||||
# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3)
|
||||
# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4)
|
||||
# define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5)
|
||||
# define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6)
|
||||
# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7)
|
||||
# define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8)
|
||||
/* supported functions vector */
|
||||
# define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0)
|
||||
# define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1)
|
||||
# define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2)
|
||||
# define ATIF_GET_LID_STATE_SUPPORTED (1 << 3)
|
||||
# define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4)
|
||||
# define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5)
|
||||
# define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6)
|
||||
# define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7)
|
||||
# define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12)
|
||||
# define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14)
|
||||
#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1
|
||||
/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* DWORD - valid flags mask
|
||||
* DWORD - flags
|
||||
*
|
||||
* OR
|
||||
*
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* DWORD - valid flags mask
|
||||
* DWORD - flags
|
||||
* BYTE - notify command code
|
||||
*
|
||||
* flags
|
||||
* bits 1:0:
|
||||
* 0 - Notify(VGA, 0x81) is not used for notification
|
||||
* 1 - Notify(VGA, 0x81) is used for notification
|
||||
* 2 - Notify(VGA, n) is used for notification where
|
||||
* n (0xd0-0xd9) is specified in notify command code.
|
||||
* bit 2:
|
||||
* 1 - lid changes not reported though int10
|
||||
*/
|
||||
#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2
|
||||
/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* DWORD - pending sbios requests
|
||||
* BYTE - panel expansion mode
|
||||
* BYTE - thermal state: target gfx controller
|
||||
* BYTE - thermal state: state id (0: exit state, non-0: state)
|
||||
* BYTE - forced power state: target gfx controller
|
||||
* BYTE - forced power state: state id
|
||||
* BYTE - system power source
|
||||
* BYTE - panel backlight level (0-255)
|
||||
*/
|
||||
/* pending sbios requests */
|
||||
# define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0)
|
||||
# define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1)
|
||||
# define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2)
|
||||
# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3)
|
||||
# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4)
|
||||
# define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5)
|
||||
# define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6)
|
||||
# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7)
|
||||
# define ATIF_DGPU_DISPLAY_EVENT (1 << 8)
|
||||
/* panel expansion mode */
|
||||
# define ATIF_PANEL_EXPANSION_DISABLE 0
|
||||
# define ATIF_PANEL_EXPANSION_FULL 1
|
||||
# define ATIF_PANEL_EXPANSION_ASPECT 2
|
||||
/* target gfx controller */
|
||||
# define ATIF_TARGET_GFX_SINGLE 0
|
||||
# define ATIF_TARGET_GFX_PX_IGPU 1
|
||||
# define ATIF_TARGET_GFX_PX_DGPU 2
|
||||
/* system power source */
|
||||
# define ATIF_POWER_SOURCE_AC 1
|
||||
# define ATIF_POWER_SOURCE_DC 2
|
||||
# define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3
|
||||
# define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4
|
||||
#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3
|
||||
/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
|
||||
* ARG1:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - selected displays
|
||||
* WORD - connected displays
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - selected displays
|
||||
*/
|
||||
# define ATIF_LCD1 (1 << 0)
|
||||
# define ATIF_CRT1 (1 << 1)
|
||||
# define ATIF_TV (1 << 2)
|
||||
# define ATIF_DFP1 (1 << 3)
|
||||
# define ATIF_CRT2 (1 << 4)
|
||||
# define ATIF_LCD2 (1 << 5)
|
||||
# define ATIF_DFP2 (1 << 7)
|
||||
# define ATIF_CV (1 << 8)
|
||||
# define ATIF_DFP3 (1 << 9)
|
||||
# define ATIF_DFP4 (1 << 10)
|
||||
# define ATIF_DFP5 (1 << 11)
|
||||
# define ATIF_DFP6 (1 << 12)
|
||||
#define ATIF_FUNCTION_GET_LID_STATE 0x4
|
||||
/* ARG0: ATIF_FUNCTION_GET_LID_STATE
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* BYTE - lid state (0: open, 1: closed)
|
||||
*
|
||||
* GET_LID_STATE only works at boot and resume, for general lid
|
||||
* status, use the kernel provided status
|
||||
*/
|
||||
#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5
|
||||
/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* BYTE - 0
|
||||
* BYTE - TV standard
|
||||
*/
|
||||
# define ATIF_TV_STD_NTSC 0
|
||||
# define ATIF_TV_STD_PAL 1
|
||||
# define ATIF_TV_STD_PALM 2
|
||||
# define ATIF_TV_STD_PAL60 3
|
||||
# define ATIF_TV_STD_NTSCJ 4
|
||||
# define ATIF_TV_STD_PALCN 5
|
||||
# define ATIF_TV_STD_PALN 6
|
||||
# define ATIF_TV_STD_SCART_RGB 9
|
||||
#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6
|
||||
/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
|
||||
* ARG1:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* BYTE - 0
|
||||
* BYTE - TV standard
|
||||
* OUTPUT: none
|
||||
*/
|
||||
#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7
|
||||
/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* BYTE - panel expansion mode
|
||||
*/
|
||||
#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8
|
||||
/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
|
||||
* ARG1:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* BYTE - panel expansion mode
|
||||
* OUTPUT: none
|
||||
*/
|
||||
#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD
|
||||
/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
|
||||
* ARG1:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - gfx controller id
|
||||
* BYTE - current temperature (degress Celsius)
|
||||
* OUTPUT: none
|
||||
*/
|
||||
#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF
|
||||
/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - number of gfx devices
|
||||
* WORD - device structure size in bytes (excludes device size field)
|
||||
* DWORD - flags \
|
||||
* WORD - bus number } repeated structure
|
||||
* WORD - device number /
|
||||
*/
|
||||
/* flags */
|
||||
# define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0)
|
||||
# define ATIF_XGP_PORT (1 << 1)
|
||||
# define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2)
|
||||
# define ATIF_XGP_PORT_IN_DOCK (1 << 3)
|
||||
|
||||
/* ATPX */
|
||||
#define ATPX_FUNCTION_VERIFY_INTERFACE 0x0
|
||||
/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - version
|
||||
* DWORD - supported functions bit vector
|
||||
*/
|
||||
/* supported functions vector */
|
||||
# define ATPX_GET_PX_PARAMETERS_SUPPORTED (1 << 0)
|
||||
# define ATPX_POWER_CONTROL_SUPPORTED (1 << 1)
|
||||
# define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED (1 << 2)
|
||||
# define ATPX_I2C_MUX_CONTROL_SUPPORTED (1 << 3)
|
||||
# define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4)
|
||||
# define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED (1 << 5)
|
||||
# define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED (1 << 7)
|
||||
# define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED (1 << 8)
|
||||
#define ATPX_FUNCTION_GET_PX_PARAMETERS 0x1
|
||||
/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* DWORD - valid flags mask
|
||||
* DWORD - flags
|
||||
*/
|
||||
/* flags */
|
||||
# define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 0)
|
||||
# define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 1)
|
||||
# define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 2)
|
||||
# define ATPX_CRT1_RGB_SIGNAL_MUXED (1 << 3)
|
||||
# define ATPX_TV_SIGNAL_MUXED (1 << 4)
|
||||
# define ATPX_DFP_SIGNAL_MUXED (1 << 5)
|
||||
# define ATPX_SEPARATE_MUX_FOR_I2C (1 << 6)
|
||||
# define ATPX_DYNAMIC_PX_SUPPORTED (1 << 7)
|
||||
# define ATPX_ACF_NOT_SUPPORTED (1 << 8)
|
||||
# define ATPX_FIXED_NOT_SUPPORTED (1 << 9)
|
||||
# define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10)
|
||||
# define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11)
|
||||
#define ATPX_FUNCTION_POWER_CONTROL 0x2
|
||||
/* ARG0: ATPX_FUNCTION_POWER_CONTROL
|
||||
* ARG1:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* BYTE - dGPU power state (0: power off, 1: power on)
|
||||
* OUTPUT: none
|
||||
*/
|
||||
#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL 0x3
|
||||
/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL
|
||||
* ARG1:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - display mux control (0: iGPU, 1: dGPU)
|
||||
* OUTPUT: none
|
||||
*/
|
||||
# define ATPX_INTEGRATED_GPU 0
|
||||
# define ATPX_DISCRETE_GPU 1
|
||||
#define ATPX_FUNCTION_I2C_MUX_CONTROL 0x4
|
||||
/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL
|
||||
* ARG1:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - i2c/aux/hpd mux control (0: iGPU, 1: dGPU)
|
||||
* OUTPUT: none
|
||||
*/
|
||||
#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION 0x5
|
||||
/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION
|
||||
* ARG1:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - target gpu (0: iGPU, 1: dGPU)
|
||||
* OUTPUT: none
|
||||
*/
|
||||
#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION 0x6
|
||||
/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION
|
||||
* ARG1:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - target gpu (0: iGPU, 1: dGPU)
|
||||
* OUTPUT: none
|
||||
*/
|
||||
#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING 0x8
|
||||
/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - number of display connectors
|
||||
* WORD - connector structure size in bytes (excludes connector size field)
|
||||
* BYTE - flags \
|
||||
* BYTE - ATIF display vector bit position } repeated
|
||||
* BYTE - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure
|
||||
* WORD - connector ACPI id /
|
||||
*/
|
||||
/* flags */
|
||||
# define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 0)
|
||||
# define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 1)
|
||||
# define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 2)
|
||||
#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS 0x9
|
||||
/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - number of HPD/DDC ports
|
||||
* WORD - port structure size in bytes (excludes port size field)
|
||||
* BYTE - ATIF display vector bit position \
|
||||
* BYTE - hpd id } reapeated structure
|
||||
* BYTE - ddc id /
|
||||
*
|
||||
* available on A+A systems only
|
||||
*/
|
||||
/* hpd id */
|
||||
# define ATPX_HPD_NONE 0
|
||||
# define ATPX_HPD1 1
|
||||
# define ATPX_HPD2 2
|
||||
# define ATPX_HPD3 3
|
||||
# define ATPX_HPD4 4
|
||||
# define ATPX_HPD5 5
|
||||
# define ATPX_HPD6 6
|
||||
/* ddc id */
|
||||
# define ATPX_DDC_NONE 0
|
||||
# define ATPX_DDC1 1
|
||||
# define ATPX_DDC2 2
|
||||
# define ATPX_DDC3 3
|
||||
# define ATPX_DDC4 4
|
||||
# define ATPX_DDC5 5
|
||||
# define ATPX_DDC6 6
|
||||
# define ATPX_DDC7 7
|
||||
# define ATPX_DDC8 8
|
||||
|
||||
/* ATCS */
|
||||
#define ATCS_FUNCTION_VERIFY_INTERFACE 0x0
|
||||
/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - version
|
||||
* DWORD - supported functions bit vector
|
||||
*/
|
||||
/* supported functions vector */
|
||||
# define ATCS_GET_EXTERNAL_STATE_SUPPORTED (1 << 0)
|
||||
# define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED (1 << 1)
|
||||
# define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED (1 << 2)
|
||||
# define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED (1 << 3)
|
||||
#define ATCS_FUNCTION_GET_EXTERNAL_STATE 0x1
|
||||
/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
|
||||
* ARG1: none
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* DWORD - valid flags mask
|
||||
* DWORD - flags (0: undocked, 1: docked)
|
||||
*/
|
||||
/* flags */
|
||||
# define ATCS_DOCKED (1 << 0)
|
||||
#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST 0x2
|
||||
/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST
|
||||
* ARG1:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
|
||||
* WORD - valid flags mask
|
||||
* WORD - flags
|
||||
* BYTE - request type
|
||||
* BYTE - performance request
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* BYTE - return value
|
||||
*/
|
||||
/* flags */
|
||||
# define ATCS_ADVERTISE_CAPS (1 << 0)
|
||||
# define ATCS_WAIT_FOR_COMPLETION (1 << 1)
|
||||
/* request type */
|
||||
# define ATCS_PCIE_LINK_SPEED 1
|
||||
/* performance request */
|
||||
# define ATCS_REMOVE 0
|
||||
# define ATCS_FORCE_LOW_POWER 1
|
||||
# define ATCS_PERF_LEVEL_1 2 /* PCIE Gen 1 */
|
||||
# define ATCS_PERF_LEVEL_2 3 /* PCIE Gen 2 */
|
||||
# define ATCS_PERF_LEVEL_3 4 /* PCIE Gen 3 */
|
||||
/* return value */
|
||||
# define ATCS_REQUEST_REFUSED 1
|
||||
# define ATCS_REQUEST_COMPLETE 2
|
||||
# define ATCS_REQUEST_IN_PROGRESS 3
|
||||
#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION 0x3
|
||||
/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION
|
||||
* ARG1: none
|
||||
* OUTPUT: none
|
||||
*/
|
||||
#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH 0x4
|
||||
/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH
|
||||
* ARG1:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
|
||||
* BYTE - number of active lanes
|
||||
* OUTPUT:
|
||||
* WORD - structure size in bytes (includes size field)
|
||||
* BYTE - number of active lanes
|
||||
*/
|
||||
|
||||
#endif
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Christian König.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Christian König
|
||||
*/
|
||||
#include <linux/hdmi.h>
|
||||
#include <linux/gcd.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
static const struct amdgpu_afmt_acr amdgpu_afmt_predefined_acr[] = {
|
||||
/* 32kHz 44.1kHz 48kHz */
|
||||
/* Clock N CTS N CTS N CTS */
|
||||
{ 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
|
||||
{ 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
|
||||
{ 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
|
||||
{ 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
|
||||
{ 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
|
||||
{ 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
|
||||
{ 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
|
||||
{ 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
|
||||
{ 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
|
||||
{ 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* calculate CTS and N values if they are not found in the table
|
||||
*/
|
||||
static void amdgpu_afmt_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
|
||||
{
|
||||
int n, cts;
|
||||
unsigned long div, mul;
|
||||
|
||||
/* Safe, but overly large values */
|
||||
n = 128 * freq;
|
||||
cts = clock * 1000;
|
||||
|
||||
/* Smallest valid fraction */
|
||||
div = gcd(n, cts);
|
||||
|
||||
n /= div;
|
||||
cts /= div;
|
||||
|
||||
/*
|
||||
* The optimal N is 128*freq/1000. Calculate the closest larger
|
||||
* value that doesn't truncate any bits.
|
||||
*/
|
||||
mul = ((128*freq/1000) + (n-1))/n;
|
||||
|
||||
n *= mul;
|
||||
cts *= mul;
|
||||
|
||||
/* Check that we are in spec (not always possible) */
|
||||
if (n < (128*freq/1500))
|
||||
printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
|
||||
if (n > (128*freq/300))
|
||||
printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
|
||||
|
||||
*N = n;
|
||||
*CTS = cts;
|
||||
|
||||
DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
|
||||
*N, *CTS, freq);
|
||||
}
|
||||
|
||||
struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
|
||||
{
|
||||
struct amdgpu_afmt_acr res;
|
||||
u8 i;
|
||||
|
||||
/* Precalculated values for common clocks */
|
||||
for (i = 0; i < ARRAY_SIZE(amdgpu_afmt_predefined_acr); i++) {
|
||||
if (amdgpu_afmt_predefined_acr[i].clock == clock)
|
||||
return amdgpu_afmt_predefined_acr[i];
|
||||
}
|
||||
|
||||
/* And odd clocks get manually calculated */
|
||||
amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
|
||||
amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
|
||||
amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
|
||||
|
||||
return res;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,206 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_ATOMBIOS_H__
|
||||
#define __AMDGPU_ATOMBIOS_H__
|
||||
|
||||
struct atom_clock_dividers {
|
||||
u32 post_div;
|
||||
union {
|
||||
struct {
|
||||
#ifdef __BIG_ENDIAN
|
||||
u32 reserved : 6;
|
||||
u32 whole_fb_div : 12;
|
||||
u32 frac_fb_div : 14;
|
||||
#else
|
||||
u32 frac_fb_div : 14;
|
||||
u32 whole_fb_div : 12;
|
||||
u32 reserved : 6;
|
||||
#endif
|
||||
};
|
||||
u32 fb_div;
|
||||
};
|
||||
u32 ref_div;
|
||||
bool enable_post_div;
|
||||
bool enable_dithen;
|
||||
u32 vco_mode;
|
||||
u32 real_clock;
|
||||
/* added for CI */
|
||||
u32 post_divider;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
struct atom_mpll_param {
|
||||
union {
|
||||
struct {
|
||||
#ifdef __BIG_ENDIAN
|
||||
u32 reserved : 8;
|
||||
u32 clkfrac : 12;
|
||||
u32 clkf : 12;
|
||||
#else
|
||||
u32 clkf : 12;
|
||||
u32 clkfrac : 12;
|
||||
u32 reserved : 8;
|
||||
#endif
|
||||
};
|
||||
u32 fb_div;
|
||||
};
|
||||
u32 post_div;
|
||||
u32 bwcntl;
|
||||
u32 dll_speed;
|
||||
u32 vco_mode;
|
||||
u32 yclk_sel;
|
||||
u32 qdr;
|
||||
u32 half_rate;
|
||||
};
|
||||
|
||||
#define MEM_TYPE_GDDR5 0x50
|
||||
#define MEM_TYPE_GDDR4 0x40
|
||||
#define MEM_TYPE_GDDR3 0x30
|
||||
#define MEM_TYPE_DDR2 0x20
|
||||
#define MEM_TYPE_GDDR1 0x10
|
||||
#define MEM_TYPE_DDR3 0xb0
|
||||
#define MEM_TYPE_MASK 0xf0
|
||||
|
||||
struct atom_memory_info {
|
||||
u8 mem_vendor;
|
||||
u8 mem_type;
|
||||
};
|
||||
|
||||
#define MAX_AC_TIMING_ENTRIES 16
|
||||
|
||||
struct atom_memory_clock_range_table
|
||||
{
|
||||
u8 num_entries;
|
||||
u8 rsv[3];
|
||||
u32 mclk[MAX_AC_TIMING_ENTRIES];
|
||||
};
|
||||
|
||||
#define VBIOS_MC_REGISTER_ARRAY_SIZE 32
|
||||
#define VBIOS_MAX_AC_TIMING_ENTRIES 20
|
||||
|
||||
struct atom_mc_reg_entry {
|
||||
u32 mclk_max;
|
||||
u32 mc_data[VBIOS_MC_REGISTER_ARRAY_SIZE];
|
||||
};
|
||||
|
||||
struct atom_mc_register_address {
|
||||
u16 s1;
|
||||
u8 pre_reg_data;
|
||||
};
|
||||
|
||||
struct atom_mc_reg_table {
|
||||
u8 last;
|
||||
u8 num_entries;
|
||||
struct atom_mc_reg_entry mc_reg_table_entry[VBIOS_MAX_AC_TIMING_ENTRIES];
|
||||
struct atom_mc_register_address mc_reg_address[VBIOS_MC_REGISTER_ARRAY_SIZE];
|
||||
};
|
||||
|
||||
#define MAX_VOLTAGE_ENTRIES 32
|
||||
|
||||
struct atom_voltage_table_entry
|
||||
{
|
||||
u16 value;
|
||||
u32 smio_low;
|
||||
};
|
||||
|
||||
struct atom_voltage_table
|
||||
{
|
||||
u32 count;
|
||||
u32 mask_low;
|
||||
u32 phase_delay;
|
||||
struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
|
||||
};
|
||||
|
||||
struct amdgpu_gpio_rec
|
||||
amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
|
||||
u8 id);
|
||||
|
||||
struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev,
|
||||
uint8_t id);
|
||||
void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
|
||||
|
||||
bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
|
||||
|
||||
bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
|
||||
struct amdgpu_atom_ss *ss,
|
||||
int id, u32 clock);
|
||||
|
||||
int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
|
||||
u8 clock_type,
|
||||
u32 clock,
|
||||
bool strobe_mode,
|
||||
struct atom_clock_dividers *dividers);
|
||||
|
||||
int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
|
||||
u32 clock,
|
||||
bool strobe_mode,
|
||||
struct atom_mpll_param *mpll_param);
|
||||
|
||||
uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
|
||||
uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
|
||||
void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
|
||||
uint32_t eng_clock);
|
||||
void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
|
||||
uint32_t mem_clock);
|
||||
void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
|
||||
u16 voltage_level,
|
||||
u8 voltage_type);
|
||||
|
||||
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
|
||||
u32 eng_clock, u32 mem_clock);
|
||||
|
||||
int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
|
||||
u16 *leakage_id);
|
||||
|
||||
int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
|
||||
u16 *vddc, u16 *vddci,
|
||||
u16 virtual_voltage_id,
|
||||
u16 vbios_voltage_id);
|
||||
|
||||
int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
|
||||
u16 virtual_voltage_id,
|
||||
u16 *voltage);
|
||||
|
||||
bool
|
||||
amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
|
||||
u8 voltage_type, u8 voltage_mode);
|
||||
|
||||
int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev,
|
||||
u8 voltage_type, u8 voltage_mode,
|
||||
struct atom_voltage_table *voltage_table);
|
||||
|
||||
int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
|
||||
u8 module_index,
|
||||
struct atom_mc_reg_table *reg_table);
|
||||
|
||||
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
|
||||
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
|
||||
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
|
||||
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,572 @@
|
|||
/*
|
||||
* Copyright (c) 2010 Red Hat Inc.
|
||||
* Author : Dave Airlie <airlied@redhat.com>
|
||||
*
|
||||
* Licensed under GPLv2
|
||||
*
|
||||
* ATPX support for both Intel/ATI
|
||||
*/
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "amdgpu_acpi.h"
|
||||
|
||||
struct amdgpu_atpx_functions {
|
||||
bool px_params;
|
||||
bool power_cntl;
|
||||
bool disp_mux_cntl;
|
||||
bool i2c_mux_cntl;
|
||||
bool switch_start;
|
||||
bool switch_end;
|
||||
bool disp_connectors_mapping;
|
||||
bool disp_detetion_ports;
|
||||
};
|
||||
|
||||
struct amdgpu_atpx {
|
||||
acpi_handle handle;
|
||||
struct amdgpu_atpx_functions functions;
|
||||
};
|
||||
|
||||
static struct amdgpu_atpx_priv {
|
||||
bool atpx_detected;
|
||||
/* handle for device - and atpx */
|
||||
acpi_handle dhandle;
|
||||
acpi_handle other_handle;
|
||||
struct amdgpu_atpx atpx;
|
||||
} amdgpu_atpx_priv;
|
||||
|
||||
struct atpx_verify_interface {
|
||||
u16 size; /* structure size in bytes (includes size field) */
|
||||
u16 version; /* version */
|
||||
u32 function_bits; /* supported functions bit vector */
|
||||
} __packed;
|
||||
|
||||
struct atpx_px_params {
|
||||
u16 size; /* structure size in bytes (includes size field) */
|
||||
u32 valid_flags; /* which flags are valid */
|
||||
u32 flags; /* flags */
|
||||
} __packed;
|
||||
|
||||
struct atpx_power_control {
|
||||
u16 size;
|
||||
u8 dgpu_state;
|
||||
} __packed;
|
||||
|
||||
struct atpx_mux {
|
||||
u16 size;
|
||||
u16 mux;
|
||||
} __packed;
|
||||
|
||||
bool amdgpu_has_atpx(void) {
|
||||
return amdgpu_atpx_priv.atpx_detected;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_call - call an ATPX method
|
||||
*
|
||||
* @handle: acpi handle
|
||||
* @function: the ATPX function to execute
|
||||
* @params: ATPX function params
|
||||
*
|
||||
* Executes the requested ATPX function (all asics).
|
||||
* Returns a pointer to the acpi output buffer.
|
||||
*/
|
||||
static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function,
|
||||
struct acpi_buffer *params)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_object atpx_arg_elements[2];
|
||||
struct acpi_object_list atpx_arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
|
||||
atpx_arg.count = 2;
|
||||
atpx_arg.pointer = &atpx_arg_elements[0];
|
||||
|
||||
atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
|
||||
atpx_arg_elements[0].integer.value = function;
|
||||
|
||||
if (params) {
|
||||
atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
|
||||
atpx_arg_elements[1].buffer.length = params->length;
|
||||
atpx_arg_elements[1].buffer.pointer = params->pointer;
|
||||
} else {
|
||||
/* We need a second fake parameter */
|
||||
atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
|
||||
atpx_arg_elements[1].integer.value = 0;
|
||||
}
|
||||
|
||||
status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
|
||||
|
||||
/* Fail only if calling the method fails and ATPX is supported */
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
printk("failed to evaluate ATPX got %s\n",
|
||||
acpi_format_exception(status));
|
||||
kfree(buffer.pointer);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return buffer.pointer;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_parse_functions - parse supported functions
|
||||
*
|
||||
* @f: supported functions struct
|
||||
* @mask: supported functions mask from ATPX
|
||||
*
|
||||
* Use the supported functions mask from ATPX function
|
||||
* ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
|
||||
* are supported (all asics).
|
||||
*/
|
||||
static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mask)
|
||||
{
|
||||
f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
|
||||
f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
|
||||
f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
|
||||
f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
|
||||
f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
|
||||
f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
|
||||
f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
|
||||
f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_validate_functions - validate ATPX functions
|
||||
*
|
||||
* @atpx: amdgpu atpx struct
|
||||
*
|
||||
* Validate that required functions are enabled (all asics).
|
||||
* returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
|
||||
{
|
||||
/* make sure required functions are enabled */
|
||||
/* dGPU power control is required */
|
||||
atpx->functions.power_cntl = true;
|
||||
|
||||
if (atpx->functions.px_params) {
|
||||
union acpi_object *info;
|
||||
struct atpx_px_params output;
|
||||
size_t size;
|
||||
u32 valid_bits;
|
||||
|
||||
info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
|
||||
memset(&output, 0, sizeof(output));
|
||||
|
||||
size = *(u16 *) info->buffer.pointer;
|
||||
if (size < 10) {
|
||||
printk("ATPX buffer is too small: %zu\n", size);
|
||||
kfree(info);
|
||||
return -EINVAL;
|
||||
}
|
||||
size = min(sizeof(output), size);
|
||||
|
||||
memcpy(&output, info->buffer.pointer, size);
|
||||
|
||||
valid_bits = output.flags & output.valid_flags;
|
||||
/* if separate mux flag is set, mux controls are required */
|
||||
if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
|
||||
atpx->functions.i2c_mux_cntl = true;
|
||||
atpx->functions.disp_mux_cntl = true;
|
||||
}
|
||||
/* if any outputs are muxed, mux controls are required */
|
||||
if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
|
||||
ATPX_TV_SIGNAL_MUXED |
|
||||
ATPX_DFP_SIGNAL_MUXED))
|
||||
atpx->functions.disp_mux_cntl = true;
|
||||
|
||||
kfree(info);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_verify_interface - verify ATPX
|
||||
*
|
||||
* @atpx: amdgpu atpx struct
|
||||
*
|
||||
* Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
|
||||
* to initialize ATPX and determine what features are supported
|
||||
* (all asics).
|
||||
* returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
|
||||
{
|
||||
union acpi_object *info;
|
||||
struct atpx_verify_interface output;
|
||||
size_t size;
|
||||
int err = 0;
|
||||
|
||||
info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
|
||||
memset(&output, 0, sizeof(output));
|
||||
|
||||
size = *(u16 *) info->buffer.pointer;
|
||||
if (size < 8) {
|
||||
printk("ATPX buffer is too small: %zu\n", size);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
size = min(sizeof(output), size);
|
||||
|
||||
memcpy(&output, info->buffer.pointer, size);
|
||||
|
||||
/* TODO: check version? */
|
||||
printk("ATPX version %u, functions 0x%08x\n",
|
||||
output.version, output.function_bits);
|
||||
|
||||
amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits);
|
||||
|
||||
out:
|
||||
kfree(info);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_set_discrete_state - power up/down discrete GPU
|
||||
*
|
||||
* @atpx: atpx info struct
|
||||
* @state: discrete GPU state (0 = power down, 1 = power up)
|
||||
*
|
||||
* Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
|
||||
* power down/up the discrete GPU (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
|
||||
{
|
||||
struct acpi_buffer params;
|
||||
union acpi_object *info;
|
||||
struct atpx_power_control input;
|
||||
|
||||
if (atpx->functions.power_cntl) {
|
||||
input.size = 3;
|
||||
input.dgpu_state = state;
|
||||
params.length = input.size;
|
||||
params.pointer = &input;
|
||||
info = amdgpu_atpx_call(atpx->handle,
|
||||
ATPX_FUNCTION_POWER_CONTROL,
|
||||
¶ms);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
kfree(info);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_switch_disp_mux - switch display mux
|
||||
*
|
||||
* @atpx: atpx info struct
|
||||
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
|
||||
*
|
||||
* Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
|
||||
* switch the display mux between the discrete GPU and integrated GPU
|
||||
* (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atpx_switch_disp_mux(struct amdgpu_atpx *atpx, u16 mux_id)
|
||||
{
|
||||
struct acpi_buffer params;
|
||||
union acpi_object *info;
|
||||
struct atpx_mux input;
|
||||
|
||||
if (atpx->functions.disp_mux_cntl) {
|
||||
input.size = 4;
|
||||
input.mux = mux_id;
|
||||
params.length = input.size;
|
||||
params.pointer = &input;
|
||||
info = amdgpu_atpx_call(atpx->handle,
|
||||
ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
|
||||
¶ms);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
kfree(info);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_switch_i2c_mux - switch i2c/hpd mux
|
||||
*
|
||||
* @atpx: atpx info struct
|
||||
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
|
||||
*
|
||||
* Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
|
||||
* switch the i2c/hpd mux between the discrete GPU and integrated GPU
|
||||
* (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atpx_switch_i2c_mux(struct amdgpu_atpx *atpx, u16 mux_id)
|
||||
{
|
||||
struct acpi_buffer params;
|
||||
union acpi_object *info;
|
||||
struct atpx_mux input;
|
||||
|
||||
if (atpx->functions.i2c_mux_cntl) {
|
||||
input.size = 4;
|
||||
input.mux = mux_id;
|
||||
params.length = input.size;
|
||||
params.pointer = &input;
|
||||
info = amdgpu_atpx_call(atpx->handle,
|
||||
ATPX_FUNCTION_I2C_MUX_CONTROL,
|
||||
¶ms);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
kfree(info);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_switch_start - notify the sbios of a GPU switch
|
||||
*
|
||||
* @atpx: atpx info struct
|
||||
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
|
||||
*
|
||||
* Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
|
||||
* function to notify the sbios that a switch between the discrete GPU and
|
||||
* integrated GPU has begun (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atpx_switch_start(struct amdgpu_atpx *atpx, u16 mux_id)
|
||||
{
|
||||
struct acpi_buffer params;
|
||||
union acpi_object *info;
|
||||
struct atpx_mux input;
|
||||
|
||||
if (atpx->functions.switch_start) {
|
||||
input.size = 4;
|
||||
input.mux = mux_id;
|
||||
params.length = input.size;
|
||||
params.pointer = &input;
|
||||
info = amdgpu_atpx_call(atpx->handle,
|
||||
ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
|
||||
¶ms);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
kfree(info);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_switch_end - notify the sbios of a GPU switch
|
||||
*
|
||||
* @atpx: atpx info struct
|
||||
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
|
||||
*
|
||||
* Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
|
||||
* function to notify the sbios that a switch between the discrete GPU and
|
||||
* integrated GPU has ended (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atpx_switch_end(struct amdgpu_atpx *atpx, u16 mux_id)
|
||||
{
|
||||
struct acpi_buffer params;
|
||||
union acpi_object *info;
|
||||
struct atpx_mux input;
|
||||
|
||||
if (atpx->functions.switch_end) {
|
||||
input.size = 4;
|
||||
input.mux = mux_id;
|
||||
params.length = input.size;
|
||||
params.pointer = &input;
|
||||
info = amdgpu_atpx_call(atpx->handle,
|
||||
ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
|
||||
¶ms);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
kfree(info);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_switchto - switch to the requested GPU
|
||||
*
|
||||
* @id: GPU to switch to
|
||||
*
|
||||
* Execute the necessary ATPX functions to switch between the discrete GPU and
|
||||
* integrated GPU (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atpx_switchto(enum vga_switcheroo_client_id id)
|
||||
{
|
||||
u16 gpu_id;
|
||||
|
||||
if (id == VGA_SWITCHEROO_IGD)
|
||||
gpu_id = ATPX_INTEGRATED_GPU;
|
||||
else
|
||||
gpu_id = ATPX_DISCRETE_GPU;
|
||||
|
||||
amdgpu_atpx_switch_start(&amdgpu_atpx_priv.atpx, gpu_id);
|
||||
amdgpu_atpx_switch_disp_mux(&amdgpu_atpx_priv.atpx, gpu_id);
|
||||
amdgpu_atpx_switch_i2c_mux(&amdgpu_atpx_priv.atpx, gpu_id);
|
||||
amdgpu_atpx_switch_end(&amdgpu_atpx_priv.atpx, gpu_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_power_state - power down/up the requested GPU
|
||||
*
|
||||
* @id: GPU to power down/up
|
||||
* @state: requested power state (0 = off, 1 = on)
|
||||
*
|
||||
* Execute the necessary ATPX function to power down/up the discrete GPU
|
||||
* (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atpx_power_state(enum vga_switcheroo_client_id id,
|
||||
enum vga_switcheroo_state state)
|
||||
{
|
||||
/* on w500 ACPI can't change intel gpu state */
|
||||
if (id == VGA_SWITCHEROO_IGD)
|
||||
return 0;
|
||||
|
||||
amdgpu_atpx_set_discrete_state(&amdgpu_atpx_priv.atpx, state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_pci_probe_handle - look up the ATPX handle
|
||||
*
|
||||
* @pdev: pci device
|
||||
*
|
||||
* Look up the ATPX handles (all asics).
|
||||
* Returns true if the handles are found, false if not.
|
||||
*/
|
||||
static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
|
||||
{
|
||||
acpi_handle dhandle, atpx_handle;
|
||||
acpi_status status;
|
||||
|
||||
dhandle = ACPI_HANDLE(&pdev->dev);
|
||||
if (!dhandle)
|
||||
return false;
|
||||
|
||||
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
amdgpu_atpx_priv.other_handle = dhandle;
|
||||
return false;
|
||||
}
|
||||
amdgpu_atpx_priv.dhandle = dhandle;
|
||||
amdgpu_atpx_priv.atpx.handle = atpx_handle;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_init - verify the ATPX interface
|
||||
*
|
||||
* Verify the ATPX interface (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atpx_init(void)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* set up the ATPX handle */
|
||||
r = amdgpu_atpx_verify_interface(&amdgpu_atpx_priv.atpx);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* validate the atpx setup */
|
||||
r = amdgpu_atpx_validate(&amdgpu_atpx_priv.atpx);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_get_client_id - get the client id
|
||||
*
|
||||
* @pdev: pci device
|
||||
*
|
||||
* look up whether we are the integrated or discrete GPU (all asics).
|
||||
* Returns the client id.
|
||||
*/
|
||||
static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
|
||||
{
|
||||
if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
|
||||
return VGA_SWITCHEROO_IGD;
|
||||
else
|
||||
return VGA_SWITCHEROO_DIS;
|
||||
}
|
||||
|
||||
static struct vga_switcheroo_handler amdgpu_atpx_handler = {
|
||||
.switchto = amdgpu_atpx_switchto,
|
||||
.power_state = amdgpu_atpx_power_state,
|
||||
.init = amdgpu_atpx_init,
|
||||
.get_client_id = amdgpu_atpx_get_client_id,
|
||||
};
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_detect - detect whether we have PX
|
||||
*
|
||||
* Check if we have a PX system (all asics).
|
||||
* Returns true if we have a PX system, false if not.
|
||||
*/
|
||||
static bool amdgpu_atpx_detect(void)
|
||||
{
|
||||
char acpi_method_name[255] = { 0 };
|
||||
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
|
||||
struct pci_dev *pdev = NULL;
|
||||
bool has_atpx = false;
|
||||
int vga_count = 0;
|
||||
|
||||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
|
||||
vga_count++;
|
||||
|
||||
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||
}
|
||||
|
||||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
|
||||
vga_count++;
|
||||
|
||||
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||
}
|
||||
|
||||
if (has_atpx && vga_count == 2) {
|
||||
acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
|
||||
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
|
||||
acpi_method_name);
|
||||
amdgpu_atpx_priv.atpx_detected = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_register_atpx_handler - register with vga_switcheroo
|
||||
*
|
||||
* Register the PX callbacks with vga_switcheroo (all asics).
|
||||
*/
|
||||
void amdgpu_register_atpx_handler(void)
|
||||
{
|
||||
bool r;
|
||||
|
||||
/* detect if we have any ATPX + 2 VGA in the system */
|
||||
r = amdgpu_atpx_detect();
|
||||
if (!r)
|
||||
return;
|
||||
|
||||
vga_switcheroo_register_handler(&amdgpu_atpx_handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_unregister_atpx_handler - unregister with vga_switcheroo
|
||||
*
|
||||
* Unregister the PX callbacks with vga_switcheroo (all asics).
|
||||
*/
|
||||
void amdgpu_unregister_atpx_handler(void)
|
||||
{
|
||||
vga_switcheroo_unregister_handler();
|
||||
}
|
|
@ -0,0 +1,221 @@
|
|||
/*
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Jerome Glisse
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
#define AMDGPU_BENCHMARK_ITERATIONS 1024
|
||||
#define AMDGPU_BENCHMARK_COMMON_MODES_N 17
|
||||
|
||||
static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
|
||||
uint64_t saddr, uint64_t daddr, int n)
|
||||
{
|
||||
unsigned long start_jiffies;
|
||||
unsigned long end_jiffies;
|
||||
struct amdgpu_fence *fence = NULL;
|
||||
int i, r;
|
||||
|
||||
start_jiffies = jiffies;
|
||||
for (i = 0; i < n; i++) {
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
|
||||
if (r)
|
||||
goto exit_do_move;
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
if (r)
|
||||
goto exit_do_move;
|
||||
amdgpu_fence_unref(&fence);
|
||||
}
|
||||
end_jiffies = jiffies;
|
||||
r = jiffies_to_msecs(end_jiffies - start_jiffies);
|
||||
|
||||
exit_do_move:
|
||||
if (fence)
|
||||
amdgpu_fence_unref(&fence);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
static void amdgpu_benchmark_log_results(int n, unsigned size,
|
||||
unsigned int time,
|
||||
unsigned sdomain, unsigned ddomain,
|
||||
char *kind)
|
||||
{
|
||||
unsigned int throughput = (n * (size >> 10)) / time;
|
||||
DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
|
||||
" %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
|
||||
kind, n, size >> 10, sdomain, ddomain, time,
|
||||
throughput * 8, throughput);
|
||||
}
|
||||
|
||||
static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
||||
unsigned sdomain, unsigned ddomain)
|
||||
{
|
||||
struct amdgpu_bo *dobj = NULL;
|
||||
struct amdgpu_bo *sobj = NULL;
|
||||
uint64_t saddr, daddr;
|
||||
int r, n;
|
||||
int time;
|
||||
|
||||
n = AMDGPU_BENCHMARK_ITERATIONS;
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = amdgpu_bo_reserve(sobj, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_cleanup;
|
||||
r = amdgpu_bo_pin(sobj, sdomain, &saddr);
|
||||
amdgpu_bo_unreserve(sobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = amdgpu_bo_reserve(dobj, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_cleanup;
|
||||
r = amdgpu_bo_pin(dobj, ddomain, &daddr);
|
||||
amdgpu_bo_unreserve(dobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs) {
|
||||
time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
|
||||
if (time < 0)
|
||||
goto out_cleanup;
|
||||
if (time > 0)
|
||||
amdgpu_benchmark_log_results(n, size, time,
|
||||
sdomain, ddomain, "dma");
|
||||
}
|
||||
|
||||
out_cleanup:
|
||||
if (sobj) {
|
||||
r = amdgpu_bo_reserve(sobj, false);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_unpin(sobj);
|
||||
amdgpu_bo_unreserve(sobj);
|
||||
}
|
||||
amdgpu_bo_unref(&sobj);
|
||||
}
|
||||
if (dobj) {
|
||||
r = amdgpu_bo_reserve(dobj, false);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_unpin(dobj);
|
||||
amdgpu_bo_unreserve(dobj);
|
||||
}
|
||||
amdgpu_bo_unref(&dobj);
|
||||
}
|
||||
|
||||
if (r) {
|
||||
DRM_ERROR("Error while benchmarking BO move.\n");
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
|
||||
{
|
||||
int i;
|
||||
int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
|
||||
640 * 480 * 4,
|
||||
720 * 480 * 4,
|
||||
800 * 600 * 4,
|
||||
848 * 480 * 4,
|
||||
1024 * 768 * 4,
|
||||
1152 * 768 * 4,
|
||||
1280 * 720 * 4,
|
||||
1280 * 800 * 4,
|
||||
1280 * 854 * 4,
|
||||
1280 * 960 * 4,
|
||||
1280 * 1024 * 4,
|
||||
1440 * 900 * 4,
|
||||
1400 * 1050 * 4,
|
||||
1680 * 1050 * 4,
|
||||
1600 * 1200 * 4,
|
||||
1920 * 1080 * 4,
|
||||
1920 * 1200 * 4
|
||||
};
|
||||
|
||||
switch (test_number) {
|
||||
case 1:
|
||||
/* simple test, VRAM to GTT and GTT to VRAM */
|
||||
amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_DOMAIN_VRAM);
|
||||
amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_DOMAIN_GTT);
|
||||
break;
|
||||
case 2:
|
||||
/* simple test, VRAM to VRAM */
|
||||
amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_DOMAIN_VRAM);
|
||||
break;
|
||||
case 3:
|
||||
/* GTT to VRAM, buffer size sweep, powers of 2 */
|
||||
for (i = 1; i <= 16384; i <<= 1)
|
||||
amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_DOMAIN_VRAM);
|
||||
break;
|
||||
case 4:
|
||||
/* VRAM to GTT, buffer size sweep, powers of 2 */
|
||||
for (i = 1; i <= 16384; i <<= 1)
|
||||
amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_DOMAIN_GTT);
|
||||
break;
|
||||
case 5:
|
||||
/* VRAM to VRAM, buffer size sweep, powers of 2 */
|
||||
for (i = 1; i <= 16384; i <<= 1)
|
||||
amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_DOMAIN_VRAM);
|
||||
break;
|
||||
case 6:
|
||||
/* GTT to VRAM, buffer size sweep, common modes */
|
||||
for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
|
||||
amdgpu_benchmark_move(adev, common_modes[i],
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_DOMAIN_VRAM);
|
||||
break;
|
||||
case 7:
|
||||
/* VRAM to GTT, buffer size sweep, common modes */
|
||||
for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
|
||||
amdgpu_benchmark_move(adev, common_modes[i],
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_DOMAIN_GTT);
|
||||
break;
|
||||
case 8:
|
||||
/* VRAM to VRAM, buffer size sweep, common modes */
|
||||
for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
|
||||
amdgpu_benchmark_move(adev, common_modes[i],
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_DOMAIN_VRAM);
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("Unknown benchmark\n");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,359 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/acpi.h>
|
||||
/*
|
||||
* BIOS.
|
||||
*/
|
||||
|
||||
/* If you boot an IGP board with a discrete card as the primary,
|
||||
* the IGP rom is not accessible via the rom bar as the IGP rom is
|
||||
* part of the system bios. On boot, the system bios puts a
|
||||
* copy of the igp rom at the start of vram if a discrete card is
|
||||
* present.
|
||||
*/
|
||||
static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
|
||||
{
|
||||
uint8_t __iomem *bios;
|
||||
resource_size_t vram_base;
|
||||
resource_size_t size = 256 * 1024; /* ??? */
|
||||
|
||||
if (!(adev->flags & AMDGPU_IS_APU))
|
||||
if (!amdgpu_card_posted(adev))
|
||||
return false;
|
||||
|
||||
adev->bios = NULL;
|
||||
vram_base = pci_resource_start(adev->pdev, 0);
|
||||
bios = ioremap(vram_base, size);
|
||||
if (!bios) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
|
||||
iounmap(bios);
|
||||
return false;
|
||||
}
|
||||
adev->bios = kmalloc(size, GFP_KERNEL);
|
||||
if (adev->bios == NULL) {
|
||||
iounmap(bios);
|
||||
return false;
|
||||
}
|
||||
memcpy_fromio(adev->bios, bios, size);
|
||||
iounmap(bios);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool amdgpu_read_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
uint8_t __iomem *bios;
|
||||
size_t size;
|
||||
|
||||
adev->bios = NULL;
|
||||
/* XXX: some cards may return 0 for rom size? ddx has a workaround */
|
||||
bios = pci_map_rom(adev->pdev, &size);
|
||||
if (!bios) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
|
||||
pci_unmap_rom(adev->pdev, bios);
|
||||
return false;
|
||||
}
|
||||
adev->bios = kmemdup(bios, size, GFP_KERNEL);
|
||||
if (adev->bios == NULL) {
|
||||
pci_unmap_rom(adev->pdev, bios);
|
||||
return false;
|
||||
}
|
||||
pci_unmap_rom(adev->pdev, bios);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
uint8_t __iomem *bios;
|
||||
size_t size;
|
||||
|
||||
adev->bios = NULL;
|
||||
|
||||
bios = pci_platform_rom(adev->pdev, &size);
|
||||
if (!bios) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
|
||||
return false;
|
||||
}
|
||||
adev->bios = kmemdup(bios, size, GFP_KERNEL);
|
||||
if (adev->bios == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/* ATRM is used to get the BIOS on the discrete cards in
|
||||
* dual-gpu systems.
|
||||
*/
|
||||
/* retrieve the ROM in 4k blocks */
|
||||
#define ATRM_BIOS_PAGE 4096
|
||||
/**
|
||||
* amdgpu_atrm_call - fetch a chunk of the vbios
|
||||
*
|
||||
* @atrm_handle: acpi ATRM handle
|
||||
* @bios: vbios image pointer
|
||||
* @offset: offset of vbios image data to fetch
|
||||
* @len: length of vbios image data to fetch
|
||||
*
|
||||
* Executes ATRM to fetch a chunk of the discrete
|
||||
* vbios image on PX systems (all asics).
|
||||
* Returns the length of the buffer fetched.
|
||||
*/
|
||||
static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
|
||||
int offset, int len)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_object atrm_arg_elements[2], *obj;
|
||||
struct acpi_object_list atrm_arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
|
||||
|
||||
atrm_arg.count = 2;
|
||||
atrm_arg.pointer = &atrm_arg_elements[0];
|
||||
|
||||
atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
|
||||
atrm_arg_elements[0].integer.value = offset;
|
||||
|
||||
atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
|
||||
atrm_arg_elements[1].integer.value = len;
|
||||
|
||||
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
obj = (union acpi_object *)buffer.pointer;
|
||||
memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
|
||||
len = obj->buffer.length;
|
||||
kfree(buffer.pointer);
|
||||
return len;
|
||||
}
|
||||
|
||||
static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
int size = 256 * 1024;
|
||||
int i;
|
||||
struct pci_dev *pdev = NULL;
|
||||
acpi_handle dhandle, atrm_handle;
|
||||
acpi_status status;
|
||||
bool found = false;
|
||||
|
||||
/* ATRM is for the discrete card only */
|
||||
if (adev->flags & AMDGPU_IS_APU)
|
||||
return false;
|
||||
|
||||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
|
||||
dhandle = ACPI_HANDLE(&pdev->dev);
|
||||
if (!dhandle)
|
||||
continue;
|
||||
|
||||
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
|
||||
if (!ACPI_FAILURE(status)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
|
||||
dhandle = ACPI_HANDLE(&pdev->dev);
|
||||
if (!dhandle)
|
||||
continue;
|
||||
|
||||
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
|
||||
if (!ACPI_FAILURE(status)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
return false;
|
||||
|
||||
adev->bios = kmalloc(size, GFP_KERNEL);
|
||||
if (!adev->bios) {
|
||||
DRM_ERROR("Unable to allocate bios\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
|
||||
ret = amdgpu_atrm_call(atrm_handle,
|
||||
adev->bios,
|
||||
(i * ATRM_BIOS_PAGE),
|
||||
ATRM_BIOS_PAGE);
|
||||
if (ret < ATRM_BIOS_PAGE)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == 0 || adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) {
|
||||
kfree(adev->bios);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->flags & AMDGPU_IS_APU)
|
||||
return igp_read_bios_from_vram(adev);
|
||||
else
|
||||
return amdgpu_asic_read_disabled_bios(adev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
bool ret = false;
|
||||
struct acpi_table_header *hdr;
|
||||
acpi_size tbl_size;
|
||||
UEFI_ACPI_VFCT *vfct;
|
||||
GOP_VBIOS_CONTENT *vbios;
|
||||
VFCT_IMAGE_HEADER *vhdr;
|
||||
|
||||
if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
|
||||
return false;
|
||||
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
|
||||
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
vfct = (UEFI_ACPI_VFCT *)hdr;
|
||||
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
|
||||
DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
|
||||
vhdr = &vbios->VbiosHeader;
|
||||
DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
|
||||
vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
|
||||
vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
|
||||
|
||||
if (vhdr->PCIBus != adev->pdev->bus->number ||
|
||||
vhdr->PCIDevice != PCI_SLOT(adev->pdev->devfn) ||
|
||||
vhdr->PCIFunction != PCI_FUNC(adev->pdev->devfn) ||
|
||||
vhdr->VendorID != adev->pdev->vendor ||
|
||||
vhdr->DeviceID != adev->pdev->device) {
|
||||
DRM_INFO("ACPI VFCT table is not for this card\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
|
||||
DRM_ERROR("ACPI VFCT image truncated\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
adev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
|
||||
ret = !!adev->bios;
|
||||
|
||||
out_unmap:
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool amdgpu_get_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
bool r;
|
||||
uint16_t tmp;
|
||||
|
||||
r = amdgpu_atrm_get_bios(adev);
|
||||
if (r == false)
|
||||
r = amdgpu_acpi_vfct_bios(adev);
|
||||
if (r == false)
|
||||
r = igp_read_bios_from_vram(adev);
|
||||
if (r == false)
|
||||
r = amdgpu_read_bios(adev);
|
||||
if (r == false) {
|
||||
r = amdgpu_read_disabled_bios(adev);
|
||||
}
|
||||
if (r == false) {
|
||||
r = amdgpu_read_platform_bios(adev);
|
||||
}
|
||||
if (r == false || adev->bios == NULL) {
|
||||
DRM_ERROR("Unable to locate a BIOS ROM\n");
|
||||
adev->bios = NULL;
|
||||
return false;
|
||||
}
|
||||
if (adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) {
|
||||
printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]);
|
||||
goto free_bios;
|
||||
}
|
||||
|
||||
tmp = RBIOS16(0x18);
|
||||
if (RBIOS8(tmp + 0x14) != 0x0) {
|
||||
DRM_INFO("Not an x86 BIOS ROM, not using.\n");
|
||||
goto free_bios;
|
||||
}
|
||||
|
||||
adev->bios_header_start = RBIOS16(0x48);
|
||||
if (!adev->bios_header_start) {
|
||||
goto free_bios;
|
||||
}
|
||||
tmp = adev->bios_header_start + 4;
|
||||
if (!memcmp(adev->bios + tmp, "ATOM", 4) ||
|
||||
!memcmp(adev->bios + tmp, "MOTA", 4)) {
|
||||
adev->is_atom_bios = true;
|
||||
} else {
|
||||
adev->is_atom_bios = false;
|
||||
}
|
||||
|
||||
DRM_DEBUG("%sBIOS detected\n", adev->is_atom_bios ? "ATOM" : "COM");
|
||||
return true;
|
||||
free_bios:
|
||||
kfree(adev->bios);
|
||||
adev->bios = NULL;
|
||||
return false;
|
||||
}
|
|
@ -0,0 +1,268 @@
|
|||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Christian König <deathsimple@vodafone.de>
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
|
||||
struct amdgpu_bo_list **result,
|
||||
int *id)
|
||||
{
|
||||
int r;
|
||||
|
||||
*result = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
|
||||
if (!*result)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&fpriv->bo_list_lock);
|
||||
r = idr_alloc(&fpriv->bo_list_handles, *result,
|
||||
0, 0, GFP_KERNEL);
|
||||
if (r < 0) {
|
||||
mutex_unlock(&fpriv->bo_list_lock);
|
||||
kfree(*result);
|
||||
return r;
|
||||
}
|
||||
*id = r;
|
||||
|
||||
mutex_init(&(*result)->lock);
|
||||
(*result)->num_entries = 0;
|
||||
(*result)->array = NULL;
|
||||
|
||||
mutex_lock(&(*result)->lock);
|
||||
mutex_unlock(&fpriv->bo_list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
|
||||
{
|
||||
struct amdgpu_bo_list *list;
|
||||
|
||||
mutex_lock(&fpriv->bo_list_lock);
|
||||
list = idr_find(&fpriv->bo_list_handles, id);
|
||||
if (list) {
|
||||
mutex_lock(&list->lock);
|
||||
idr_remove(&fpriv->bo_list_handles, id);
|
||||
mutex_unlock(&list->lock);
|
||||
amdgpu_bo_list_free(list);
|
||||
}
|
||||
mutex_unlock(&fpriv->bo_list_lock);
|
||||
}
|
||||
|
||||
static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
||||
struct drm_file *filp,
|
||||
struct amdgpu_bo_list *list,
|
||||
struct drm_amdgpu_bo_list_entry *info,
|
||||
unsigned num_entries)
|
||||
{
|
||||
struct amdgpu_bo_list_entry *array;
|
||||
struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
|
||||
struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
|
||||
struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
|
||||
|
||||
bool has_userptr = false;
|
||||
unsigned i;
|
||||
|
||||
array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
|
||||
if (!array)
|
||||
return -ENOMEM;
|
||||
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
|
||||
|
||||
for (i = 0; i < num_entries; ++i) {
|
||||
struct amdgpu_bo_list_entry *entry = &array[i];
|
||||
struct drm_gem_object *gobj;
|
||||
|
||||
gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
|
||||
if (!gobj)
|
||||
goto error_free;
|
||||
|
||||
entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
entry->priority = info[i].bo_priority;
|
||||
entry->prefered_domains = entry->robj->initial_domain;
|
||||
entry->allowed_domains = entry->prefered_domains;
|
||||
if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
|
||||
entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
|
||||
if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) {
|
||||
has_userptr = true;
|
||||
entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||
entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||
}
|
||||
entry->tv.bo = &entry->robj->tbo;
|
||||
entry->tv.shared = true;
|
||||
|
||||
if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
|
||||
gds_obj = entry->robj;
|
||||
if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
|
||||
gws_obj = entry->robj;
|
||||
if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
|
||||
oa_obj = entry->robj;
|
||||
}
|
||||
|
||||
for (i = 0; i < list->num_entries; ++i)
|
||||
amdgpu_bo_unref(&list->array[i].robj);
|
||||
|
||||
drm_free_large(list->array);
|
||||
|
||||
list->gds_obj = gds_obj;
|
||||
list->gws_obj = gws_obj;
|
||||
list->oa_obj = oa_obj;
|
||||
list->has_userptr = has_userptr;
|
||||
list->array = array;
|
||||
list->num_entries = num_entries;
|
||||
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
drm_free_large(array);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
struct amdgpu_bo_list *
|
||||
amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
|
||||
{
|
||||
struct amdgpu_bo_list *result;
|
||||
|
||||
mutex_lock(&fpriv->bo_list_lock);
|
||||
result = idr_find(&fpriv->bo_list_handles, id);
|
||||
if (result)
|
||||
mutex_lock(&result->lock);
|
||||
mutex_unlock(&fpriv->bo_list_lock);
|
||||
return result;
|
||||
}
|
||||
|
||||
void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
|
||||
{
|
||||
mutex_unlock(&list->lock);
|
||||
}
|
||||
|
||||
void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < list->num_entries; ++i)
|
||||
amdgpu_bo_unref(&list->array[i].robj);
|
||||
|
||||
mutex_destroy(&list->lock);
|
||||
drm_free_large(list->array);
|
||||
kfree(list);
|
||||
}
|
||||
|
||||
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
|
||||
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
union drm_amdgpu_bo_list *args = data;
|
||||
uint32_t handle = args->in.list_handle;
|
||||
const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;
|
||||
|
||||
struct drm_amdgpu_bo_list_entry *info;
|
||||
struct amdgpu_bo_list *list;
|
||||
|
||||
int r;
|
||||
|
||||
info = drm_malloc_ab(args->in.bo_number,
|
||||
sizeof(struct drm_amdgpu_bo_list_entry));
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
/* copy the handle array from userspace to a kernel buffer */
|
||||
r = -EFAULT;
|
||||
if (likely(info_size == args->in.bo_info_size)) {
|
||||
unsigned long bytes = args->in.bo_number *
|
||||
args->in.bo_info_size;
|
||||
|
||||
if (copy_from_user(info, uptr, bytes))
|
||||
goto error_free;
|
||||
|
||||
} else {
|
||||
unsigned long bytes = min(args->in.bo_info_size, info_size);
|
||||
unsigned i;
|
||||
|
||||
memset(info, 0, args->in.bo_number * info_size);
|
||||
for (i = 0; i < args->in.bo_number; ++i) {
|
||||
if (copy_from_user(&info[i], uptr, bytes))
|
||||
goto error_free;
|
||||
|
||||
uptr += args->in.bo_info_size;
|
||||
}
|
||||
}
|
||||
|
||||
switch (args->in.operation) {
|
||||
case AMDGPU_BO_LIST_OP_CREATE:
|
||||
r = amdgpu_bo_list_create(fpriv, &list, &handle);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
r = amdgpu_bo_list_set(adev, filp, list, info,
|
||||
args->in.bo_number);
|
||||
amdgpu_bo_list_put(list);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
break;
|
||||
|
||||
case AMDGPU_BO_LIST_OP_DESTROY:
|
||||
amdgpu_bo_list_destroy(fpriv, handle);
|
||||
handle = 0;
|
||||
break;
|
||||
|
||||
case AMDGPU_BO_LIST_OP_UPDATE:
|
||||
r = -ENOENT;
|
||||
list = amdgpu_bo_list_get(fpriv, handle);
|
||||
if (!list)
|
||||
goto error_free;
|
||||
|
||||
r = amdgpu_bo_list_set(adev, filp, list, info,
|
||||
args->in.bo_number);
|
||||
amdgpu_bo_list_put(list);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
r = -EINVAL;
|
||||
goto error_free;
|
||||
}
|
||||
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.list_handle = handle;
|
||||
drm_free_large(info);
|
||||
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
drm_free_large(info);
|
||||
return r;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_CONNECTORS_H__
|
||||
#define __AMDGPU_CONNECTORS_H__
|
||||
|
||||
struct edid *amdgpu_connector_edid(struct drm_connector *connector);
|
||||
void amdgpu_connector_hotplug(struct drm_connector *connector);
|
||||
int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector);
|
||||
u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
|
||||
bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector);
|
||||
void
|
||||
amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
uint32_t connector_id,
|
||||
uint32_t supported_device,
|
||||
int connector_type,
|
||||
struct amdgpu_i2c_bus_rec *i2c_bus,
|
||||
uint16_t connector_object_id,
|
||||
struct amdgpu_hpd *hpd,
|
||||
struct amdgpu_router *router);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,825 @@
|
|||
/*
|
||||
* Copyright 2008 Jerome Glisse.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Jerome Glisse <glisse@freedesktop.org>
|
||||
*/
|
||||
#include <linux/list_sort.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
#define AMDGPU_CS_MAX_PRIORITY 32u
|
||||
#define AMDGPU_CS_NUM_BUCKETS (AMDGPU_CS_MAX_PRIORITY + 1)
|
||||
|
||||
/* This is based on the bucket sort with O(n) time complexity.
|
||||
* An item with priority "i" is added to bucket[i]. The lists are then
|
||||
* concatenated in descending order.
|
||||
*/
|
||||
struct amdgpu_cs_buckets {
|
||||
struct list_head bucket[AMDGPU_CS_NUM_BUCKETS];
|
||||
};
|
||||
|
||||
static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++)
|
||||
INIT_LIST_HEAD(&b->bucket[i]);
|
||||
}
|
||||
|
||||
static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b,
|
||||
struct list_head *item, unsigned priority)
|
||||
{
|
||||
/* Since buffers which appear sooner in the relocation list are
|
||||
* likely to be used more often than buffers which appear later
|
||||
* in the list, the sort mustn't change the ordering of buffers
|
||||
* with the same priority, i.e. it must be stable.
|
||||
*/
|
||||
list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]);
|
||||
}
|
||||
|
||||
static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b,
|
||||
struct list_head *out_list)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Connect the sorted buckets in the output list. */
|
||||
for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) {
|
||||
list_splice(&b->bucket[i], out_list);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
||||
u32 ip_instance, u32 ring,
|
||||
struct amdgpu_ring **out_ring)
|
||||
{
|
||||
/* Right now all IPs have only one instance - multiple rings. */
|
||||
if (ip_instance != 0) {
|
||||
DRM_ERROR("invalid ip instance: %d\n", ip_instance);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (ip_type) {
|
||||
default:
|
||||
DRM_ERROR("unknown ip type: %d\n", ip_type);
|
||||
return -EINVAL;
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
if (ring < adev->gfx.num_gfx_rings) {
|
||||
*out_ring = &adev->gfx.gfx_ring[ring];
|
||||
} else {
|
||||
DRM_ERROR("only %d gfx rings are supported now\n",
|
||||
adev->gfx.num_gfx_rings);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
if (ring < adev->gfx.num_compute_rings) {
|
||||
*out_ring = &adev->gfx.compute_ring[ring];
|
||||
} else {
|
||||
DRM_ERROR("only %d compute rings are supported now\n",
|
||||
adev->gfx.num_compute_rings);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
if (ring < 2) {
|
||||
*out_ring = &adev->sdma[ring].ring;
|
||||
} else {
|
||||
DRM_ERROR("only two SDMA rings are supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
*out_ring = &adev->uvd.ring;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
if (ring < 2){
|
||||
*out_ring = &adev->vce.ring[ring];
|
||||
} else {
|
||||
DRM_ERROR("only two VCE rings are supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
||||
{
|
||||
union drm_amdgpu_cs *cs = data;
|
||||
uint64_t *chunk_array_user;
|
||||
uint64_t *chunk_array = NULL;
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
unsigned size, i;
|
||||
int r = 0;
|
||||
|
||||
if (!cs->in.num_chunks)
|
||||
goto out;
|
||||
|
||||
p->ctx_id = cs->in.ctx_id;
|
||||
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
|
||||
|
||||
/* get chunks */
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
|
||||
if (chunk_array == NULL) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks);
|
||||
if (copy_from_user(chunk_array, chunk_array_user,
|
||||
sizeof(uint64_t)*cs->in.num_chunks)) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
p->nchunks = cs->in.num_chunks;
|
||||
p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk),
|
||||
GFP_KERNEL);
|
||||
if (p->chunks == NULL) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < p->nchunks; i++) {
|
||||
struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
|
||||
struct drm_amdgpu_cs_chunk user_chunk;
|
||||
uint32_t __user *cdata;
|
||||
|
||||
chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
|
||||
if (copy_from_user(&user_chunk, chunk_ptr,
|
||||
sizeof(struct drm_amdgpu_cs_chunk))) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
p->chunks[i].chunk_id = user_chunk.chunk_id;
|
||||
p->chunks[i].length_dw = user_chunk.length_dw;
|
||||
if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_IB)
|
||||
p->num_ibs++;
|
||||
|
||||
size = p->chunks[i].length_dw;
|
||||
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
|
||||
p->chunks[i].user_ptr = cdata;
|
||||
|
||||
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
|
||||
if (p->chunks[i].kdata == NULL) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
size *= sizeof(uint32_t);
|
||||
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_FENCE) {
|
||||
size = sizeof(struct drm_amdgpu_cs_chunk_fence);
|
||||
if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
|
||||
uint32_t handle;
|
||||
struct drm_gem_object *gobj;
|
||||
struct drm_amdgpu_cs_chunk_fence *fence_data;
|
||||
|
||||
fence_data = (void *)p->chunks[i].kdata;
|
||||
handle = fence_data->handle;
|
||||
gobj = drm_gem_object_lookup(p->adev->ddev,
|
||||
p->filp, handle);
|
||||
if (gobj == NULL) {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
p->uf.bo = gem_to_amdgpu_bo(gobj);
|
||||
p->uf.offset = fence_data->offset;
|
||||
} else {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
|
||||
if (!p->ibs) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
p->ib_bos = kcalloc(p->num_ibs, sizeof(struct amdgpu_bo_list_entry),
|
||||
GFP_KERNEL);
|
||||
if (!p->ib_bos)
|
||||
r = -ENOMEM;
|
||||
|
||||
out:
|
||||
kfree(chunk_array);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Returns how many bytes TTM can move per IB.
|
||||
*/
|
||||
static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
|
||||
{
|
||||
u64 real_vram_size = adev->mc.real_vram_size;
|
||||
u64 vram_usage = atomic64_read(&adev->vram_usage);
|
||||
|
||||
/* This function is based on the current VRAM usage.
|
||||
*
|
||||
* - If all of VRAM is free, allow relocating the number of bytes that
|
||||
* is equal to 1/4 of the size of VRAM for this IB.
|
||||
|
||||
* - If more than one half of VRAM is occupied, only allow relocating
|
||||
* 1 MB of data for this IB.
|
||||
*
|
||||
* - From 0 to one half of used VRAM, the threshold decreases
|
||||
* linearly.
|
||||
* __________________
|
||||
* 1/4 of -|\ |
|
||||
* VRAM | \ |
|
||||
* | \ |
|
||||
* | \ |
|
||||
* | \ |
|
||||
* | \ |
|
||||
* | \ |
|
||||
* | \________|1 MB
|
||||
* |----------------|
|
||||
* VRAM 0 % 100 %
|
||||
* used used
|
||||
*
|
||||
* Note: It's a threshold, not a limit. The threshold must be crossed
|
||||
* for buffer relocations to stop, so any buffer of an arbitrary size
|
||||
* can be moved as long as the threshold isn't crossed before
|
||||
* the relocation takes place. We don't want to disable buffer
|
||||
* relocations completely.
|
||||
*
|
||||
* The idea is that buffers should be placed in VRAM at creation time
|
||||
* and TTM should only do a minimum number of relocations during
|
||||
* command submission. In practice, you need to submit at least
|
||||
* a dozen IBs to move all buffers to VRAM if they are in GTT.
|
||||
*
|
||||
* Also, things can get pretty crazy under memory pressure and actual
|
||||
* VRAM usage can change a lot, so playing safe even at 50% does
|
||||
* consistently increase performance.
|
||||
*/
|
||||
|
||||
u64 half_vram = real_vram_size >> 1;
|
||||
u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
|
||||
u64 bytes_moved_threshold = half_free_vram >> 1;
|
||||
return max(bytes_moved_threshold, 1024*1024ull);
|
||||
}
|
||||
|
||||
int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_device *adev = p->adev;
|
||||
struct amdgpu_bo_list_entry *lobj;
|
||||
struct list_head duplicates;
|
||||
struct amdgpu_bo *bo;
|
||||
u64 bytes_moved = 0, initial_bytes_moved;
|
||||
u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
|
||||
if (unlikely(r != 0)) {
|
||||
return r;
|
||||
}
|
||||
|
||||
list_for_each_entry(lobj, &p->validated, tv.head) {
|
||||
bo = lobj->robj;
|
||||
if (!bo->pin_count) {
|
||||
u32 domain = lobj->prefered_domains;
|
||||
u32 current_domain =
|
||||
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
|
||||
/* Check if this buffer will be moved and don't move it
|
||||
* if we have moved too many buffers for this IB already.
|
||||
*
|
||||
* Note that this allows moving at least one buffer of
|
||||
* any size, because it doesn't take the current "bo"
|
||||
* into account. We don't want to disallow buffer moves
|
||||
* completely.
|
||||
*/
|
||||
if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
|
||||
(domain & current_domain) == 0 && /* will be moved */
|
||||
bytes_moved > bytes_moved_threshold) {
|
||||
/* don't move it */
|
||||
domain = current_domain;
|
||||
}
|
||||
|
||||
retry:
|
||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
||||
bytes_moved += atomic64_read(&adev->num_bytes_moved) -
|
||||
initial_bytes_moved;
|
||||
|
||||
if (unlikely(r)) {
|
||||
if (r != -ERESTARTSYS && domain != lobj->allowed_domains) {
|
||||
domain = lobj->allowed_domains;
|
||||
goto retry;
|
||||
}
|
||||
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
lobj->bo_va = amdgpu_vm_bo_find(vm, bo);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_cs_buckets buckets;
|
||||
bool need_mmap_lock;
|
||||
int i, r;
|
||||
|
||||
if (p->bo_list == NULL)
|
||||
return 0;
|
||||
|
||||
need_mmap_lock = p->bo_list->has_userptr;
|
||||
amdgpu_cs_buckets_init(&buckets);
|
||||
for (i = 0; i < p->bo_list->num_entries; i++)
|
||||
amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head,
|
||||
p->bo_list->array[i].priority);
|
||||
|
||||
amdgpu_cs_buckets_get_list(&buckets, &p->validated);
|
||||
p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
|
||||
&p->validated);
|
||||
|
||||
for (i = 0; i < p->num_ibs; i++) {
|
||||
if (!p->ib_bos[i].robj)
|
||||
continue;
|
||||
|
||||
list_add(&p->ib_bos[i].tv.head, &p->validated);
|
||||
}
|
||||
|
||||
if (need_mmap_lock)
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
||||
r = amdgpu_cs_list_validate(p);
|
||||
|
||||
if (need_mmap_lock)
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct amdgpu_bo_list_entry *e;
|
||||
int r;
|
||||
|
||||
list_for_each_entry(e, &p->validated, tv.head) {
|
||||
struct reservation_object *resv = e->robj->tbo.resv;
|
||||
r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp);
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cmp_size_smaller_first(void *priv, struct list_head *a,
|
||||
struct list_head *b)
|
||||
{
|
||||
struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head);
|
||||
struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head);
|
||||
|
||||
/* Sort A before B if A is smaller. */
|
||||
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* cs_parser_fini() - clean parser states
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @error: error number
|
||||
*
|
||||
* If error is set than unvalidate buffer, otherwise just free memory
|
||||
* used by parsing context.
|
||||
**/
|
||||
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (!error) {
|
||||
/* Sort the buffer list from the smallest to largest buffer,
|
||||
* which affects the order of buffers in the LRU list.
|
||||
* This assures that the smallest buffers are added first
|
||||
* to the LRU list, so they are likely to be later evicted
|
||||
* first, instead of large buffers whose eviction is more
|
||||
* expensive.
|
||||
*
|
||||
* This slightly lowers the number of bytes moved by TTM
|
||||
* per frame under memory pressure.
|
||||
*/
|
||||
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
|
||||
|
||||
ttm_eu_fence_buffer_objects(&parser->ticket,
|
||||
&parser->validated,
|
||||
&parser->ibs[parser->num_ibs-1].fence->base);
|
||||
} else if (backoff) {
|
||||
ttm_eu_backoff_reservation(&parser->ticket,
|
||||
&parser->validated);
|
||||
}
|
||||
|
||||
if (parser->bo_list)
|
||||
amdgpu_bo_list_put(parser->bo_list);
|
||||
drm_free_large(parser->vm_bos);
|
||||
for (i = 0; i < parser->nchunks; i++)
|
||||
drm_free_large(parser->chunks[i].kdata);
|
||||
kfree(parser->chunks);
|
||||
for (i = 0; i < parser->num_ibs; i++) {
|
||||
struct amdgpu_bo *bo = parser->ib_bos[i].robj;
|
||||
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
|
||||
|
||||
if (bo)
|
||||
drm_gem_object_unreference_unlocked(&bo->gem_base);
|
||||
}
|
||||
kfree(parser->ibs);
|
||||
kfree(parser->ib_bos);
|
||||
if (parser->uf.bo)
|
||||
drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
|
||||
}
|
||||
|
||||
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
struct amdgpu_device *adev = p->adev;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct amdgpu_bo *bo;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_vm_update_page_directory(adev, vm);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, vm);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (p->bo_list) {
|
||||
for (i = 0; i < p->bo_list->num_entries; i++) {
|
||||
/* ignore duplicates */
|
||||
bo = p->bo_list->array[i].robj;
|
||||
if (!bo)
|
||||
continue;
|
||||
|
||||
bo_va = p->bo_list->array[i].bo_va;
|
||||
if (bo_va == NULL)
|
||||
continue;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < p->num_ibs; i++) {
|
||||
bo = p->ib_bos[i].robj;
|
||||
if (!bo)
|
||||
continue;
|
||||
|
||||
bo_va = p->ib_bos[i].bo_va;
|
||||
if (!bo_va)
|
||||
continue;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
|
||||
}
|
||||
return amdgpu_vm_clear_invalids(adev, vm);
|
||||
}
|
||||
|
||||
static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
||||
struct amdgpu_cs_parser *parser)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_ring *ring;
|
||||
int i, r;
|
||||
|
||||
if (parser->num_ibs == 0)
|
||||
return 0;
|
||||
|
||||
/* Only for UVD/VCE VM emulation */
|
||||
for (i = 0; i < parser->num_ibs; i++) {
|
||||
ring = parser->ibs[i].ring;
|
||||
if (ring->funcs->parse_cs) {
|
||||
r = amdgpu_ring_parse_cs(ring, parser, i);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
r = amdgpu_bo_vm_update_pte(parser, vm);
|
||||
if (r) {
|
||||
goto out;
|
||||
}
|
||||
amdgpu_cs_sync_rings(parser);
|
||||
|
||||
r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
|
||||
parser->filp);
|
||||
|
||||
out:
|
||||
mutex_unlock(&vm->mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
|
||||
{
|
||||
if (r == -EDEADLK) {
|
||||
r = amdgpu_gpu_reset(adev);
|
||||
if (!r)
|
||||
r = -EAGAIN;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
||||
struct amdgpu_cs_parser *parser)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
int i, j;
|
||||
int r;
|
||||
|
||||
for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) {
|
||||
struct amdgpu_cs_chunk *chunk;
|
||||
struct amdgpu_ib *ib;
|
||||
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
|
||||
struct amdgpu_bo_list_entry *ib_bo;
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdgpu_bo *aobj;
|
||||
void *kptr;
|
||||
|
||||
chunk = &parser->chunks[i];
|
||||
ib = &parser->ibs[j];
|
||||
chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
|
||||
|
||||
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
|
||||
continue;
|
||||
|
||||
gobj = drm_gem_object_lookup(adev->ddev, parser->filp, chunk_ib->handle);
|
||||
if (gobj == NULL)
|
||||
return -ENOENT;
|
||||
aobj = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
|
||||
chunk_ib->ip_instance, chunk_ib->ring,
|
||||
&ring);
|
||||
if (r) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (ring->funcs->parse_cs) {
|
||||
r = amdgpu_bo_reserve(aobj, false);
|
||||
if (r) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(aobj, &kptr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
|
||||
amdgpu_bo_kunmap(aobj);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
} else {
|
||||
r = amdgpu_ib_get(ring, vm, 0, ib);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
ib->gpu_addr = chunk_ib->va_start;
|
||||
}
|
||||
ib->length_dw = chunk_ib->ib_bytes / 4;
|
||||
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
|
||||
ib->is_const_ib = true;
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_GDS)
|
||||
ib->gds_needed = true;
|
||||
if (ib->ring->current_filp != parser->filp) {
|
||||
ib->ring->need_ctx_switch = true;
|
||||
ib->ring->current_filp = parser->filp;
|
||||
}
|
||||
|
||||
ib_bo = &parser->ib_bos[j];
|
||||
ib_bo->robj = aobj;
|
||||
ib_bo->prefered_domains = aobj->initial_domain;
|
||||
ib_bo->allowed_domains = aobj->initial_domain;
|
||||
ib_bo->priority = 0;
|
||||
ib_bo->tv.bo = &aobj->tbo;
|
||||
ib_bo->tv.shared = true;
|
||||
j++;
|
||||
}
|
||||
|
||||
if (!parser->num_ibs)
|
||||
return 0;
|
||||
|
||||
/* add GDS resources to first IB */
|
||||
if (parser->bo_list) {
|
||||
struct amdgpu_bo *gds = parser->bo_list->gds_obj;
|
||||
struct amdgpu_bo *gws = parser->bo_list->gws_obj;
|
||||
struct amdgpu_bo *oa = parser->bo_list->oa_obj;
|
||||
struct amdgpu_ib *ib = &parser->ibs[0];
|
||||
|
||||
if (gds) {
|
||||
ib->gds_base = amdgpu_bo_gpu_offset(gds);
|
||||
ib->gds_size = amdgpu_bo_size(gds);
|
||||
}
|
||||
if (gws) {
|
||||
ib->gws_base = amdgpu_bo_gpu_offset(gws);
|
||||
ib->gws_size = amdgpu_bo_size(gws);
|
||||
}
|
||||
if (oa) {
|
||||
ib->oa_base = amdgpu_bo_gpu_offset(oa);
|
||||
ib->oa_size = amdgpu_bo_size(oa);
|
||||
}
|
||||
}
|
||||
|
||||
/* wrap the last IB with user fence */
|
||||
if (parser->uf.bo) {
|
||||
struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
|
||||
|
||||
/* UVD & VCE fw doesn't support user fences */
|
||||
if (ib->ring->type == AMDGPU_RING_TYPE_UVD ||
|
||||
ib->ring->type == AMDGPU_RING_TYPE_VCE)
|
||||
return -EINVAL;
|
||||
|
||||
ib->user = &parser->uf;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
union drm_amdgpu_cs *cs = data;
|
||||
struct amdgpu_cs_parser parser;
|
||||
int r, i;
|
||||
|
||||
down_read(&adev->exclusive_lock);
|
||||
if (!adev->accel_working) {
|
||||
up_read(&adev->exclusive_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
/* initialize parser */
|
||||
memset(&parser, 0, sizeof(struct amdgpu_cs_parser));
|
||||
parser.filp = filp;
|
||||
parser.adev = adev;
|
||||
r = amdgpu_cs_parser_init(&parser, data);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to initialize parser !\n");
|
||||
amdgpu_cs_parser_fini(&parser, r, false);
|
||||
up_read(&adev->exclusive_lock);
|
||||
r = amdgpu_cs_handle_lockup(adev, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_cs_ib_fill(adev, &parser);
|
||||
if (!r) {
|
||||
r = amdgpu_cs_parser_relocs(&parser);
|
||||
if (r && r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to parse relocation %d!\n", r);
|
||||
}
|
||||
|
||||
if (r) {
|
||||
amdgpu_cs_parser_fini(&parser, r, false);
|
||||
up_read(&adev->exclusive_lock);
|
||||
r = amdgpu_cs_handle_lockup(adev, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < parser.num_ibs; i++)
|
||||
trace_amdgpu_cs(&parser, i);
|
||||
|
||||
r = amdgpu_cs_ib_vm_chunk(adev, &parser);
|
||||
if (r) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq;
|
||||
out:
|
||||
amdgpu_cs_parser_fini(&parser, r, true);
|
||||
up_read(&adev->exclusive_lock);
|
||||
r = amdgpu_cs_handle_lockup(adev, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_cs_wait_ioctl - wait for a command submission to finish
|
||||
*
|
||||
* @dev: drm device
|
||||
* @data: data from userspace
|
||||
* @filp: file private
|
||||
*
|
||||
* Wait for the command submission identified by handle to finish.
|
||||
*/
|
||||
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
union drm_amdgpu_wait_cs *wait = data;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
uint64_t seq[AMDGPU_MAX_RINGS] = {0};
|
||||
struct amdgpu_ring *ring = NULL;
|
||||
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
|
||||
long r;
|
||||
|
||||
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
|
||||
wait->in.ring, &ring);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
seq[ring->idx] = wait->in.handle;
|
||||
|
||||
r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
memset(wait, 0, sizeof(*wait));
|
||||
wait->out.status = (r == 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_cs_find_bo_va - find bo_va for VM address
|
||||
*
|
||||
* @parser: command submission parser context
|
||||
* @addr: VM address
|
||||
* @bo: resulting BO of the mapping found
|
||||
*
|
||||
* Search the buffer objects in the command submission context for a certain
|
||||
* virtual memory address. Returns allocation structure when found, NULL
|
||||
* otherwise.
|
||||
*/
|
||||
struct amdgpu_bo_va_mapping *
|
||||
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||
uint64_t addr, struct amdgpu_bo **bo)
|
||||
{
|
||||
struct amdgpu_bo_list_entry *reloc;
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
|
||||
addr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
list_for_each_entry(reloc, &parser->validated, tv.head) {
|
||||
if (!reloc->bo_va)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
|
||||
if (mapping->it.start > addr ||
|
||||
addr > mapping->it.last)
|
||||
continue;
|
||||
|
||||
*bo = reloc->bo_va->bo;
|
||||
return mapping;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: monk liu <monk.liu@amd.com>
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
static void amdgpu_ctx_do_release(struct kref *ref)
|
||||
{
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct amdgpu_ctx_mgr *mgr;
|
||||
|
||||
ctx = container_of(ref, struct amdgpu_ctx, refcount);
|
||||
mgr = &ctx->fpriv->ctx_mgr;
|
||||
|
||||
mutex_lock(&mgr->hlock);
|
||||
idr_remove(&mgr->ctx_handles, ctx->id);
|
||||
mutex_unlock(&mgr->hlock);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
|
||||
|
||||
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&mgr->hlock);
|
||||
r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL);
|
||||
if (r < 0) {
|
||||
mutex_unlock(&mgr->hlock);
|
||||
kfree(ctx);
|
||||
return r;
|
||||
}
|
||||
mutex_unlock(&mgr->hlock);
|
||||
*id = (uint32_t)r;
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
ctx->id = *id;
|
||||
ctx->fpriv = fpriv;
|
||||
kref_init(&ctx->refcount);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
|
||||
|
||||
rcu_read_lock();
|
||||
ctx = idr_find(&mgr->ctx_handles, id);
|
||||
rcu_read_unlock();
|
||||
if (ctx) {
|
||||
/* if no task is pending on this context, free it */
|
||||
r = kref_put(&ctx->refcount, amdgpu_ctx_do_release);
|
||||
if (r == 1)
|
||||
return 0;//context is removed successfully
|
||||
else {
|
||||
/* context is still in using */
|
||||
kref_get(&ctx->refcount);
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int amdgpu_ctx_query(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id, struct amdgpu_ctx_state *state)
|
||||
{
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
|
||||
|
||||
rcu_read_lock();
|
||||
ctx = idr_find(&mgr->ctx_handles, id);
|
||||
rcu_read_unlock();
|
||||
if (ctx) {
|
||||
/* state should alter with CS activity */
|
||||
*state = ctx->state;
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
|
||||
{
|
||||
struct idr *idp;
|
||||
struct amdgpu_ctx *ctx;
|
||||
uint32_t id;
|
||||
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
|
||||
idp = &mgr->ctx_handles;
|
||||
|
||||
idr_for_each_entry(idp,ctx,id) {
|
||||
if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
|
||||
DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id);
|
||||
}
|
||||
|
||||
mutex_destroy(&mgr->hlock);
|
||||
}
|
||||
|
||||
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
int r;
|
||||
uint32_t id;
|
||||
uint32_t flags;
|
||||
struct amdgpu_ctx_state state;
|
||||
|
||||
union drm_amdgpu_ctx *args = data;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
|
||||
r = 0;
|
||||
id = args->in.ctx_id;
|
||||
flags = args->in.flags;
|
||||
|
||||
switch (args->in.op) {
|
||||
case AMDGPU_CTX_OP_ALLOC_CTX:
|
||||
r = amdgpu_ctx_alloc(adev, fpriv, &id, flags);
|
||||
args->out.alloc.ctx_id = id;
|
||||
break;
|
||||
case AMDGPU_CTX_OP_FREE_CTX:
|
||||
r = amdgpu_ctx_free(adev, fpriv, id);
|
||||
break;
|
||||
case AMDGPU_CTX_OP_QUERY_STATE:
|
||||
r = amdgpu_ctx_query(adev, fpriv, id, &state);
|
||||
if (r == 0) {
|
||||
args->out.state.flags = state.flags;
|
||||
args->out.state.hangs = state.hangs;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,832 @@
|
|||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_i2c.h"
|
||||
#include "atom.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
#include <asm/div64.h>
|
||||
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
||||
|
||||
static void amdgpu_flip_work_func(struct work_struct *__work)
|
||||
{
|
||||
struct amdgpu_flip_work *work =
|
||||
container_of(__work, struct amdgpu_flip_work, flip_work);
|
||||
struct amdgpu_device *adev = work->adev;
|
||||
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
|
||||
|
||||
struct drm_crtc *crtc = &amdgpuCrtc->base;
|
||||
struct amdgpu_fence *fence;
|
||||
unsigned long flags;
|
||||
int r;
|
||||
|
||||
down_read(&adev->exclusive_lock);
|
||||
if (work->fence) {
|
||||
fence = to_amdgpu_fence(work->fence);
|
||||
if (fence) {
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
if (r == -EDEADLK) {
|
||||
up_read(&adev->exclusive_lock);
|
||||
r = amdgpu_gpu_reset(adev);
|
||||
down_read(&adev->exclusive_lock);
|
||||
}
|
||||
} else
|
||||
r = fence_wait(work->fence, false);
|
||||
|
||||
if (r)
|
||||
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
|
||||
|
||||
/* We continue with the page flip even if we failed to wait on
|
||||
* the fence, otherwise the DRM core and userspace will be
|
||||
* confused about which BO the CRTC is scanning out
|
||||
*/
|
||||
|
||||
fence_put(work->fence);
|
||||
work->fence = NULL;
|
||||
}
|
||||
|
||||
/* We borrow the event spin lock for protecting flip_status */
|
||||
spin_lock_irqsave(&crtc->dev->event_lock, flags);
|
||||
|
||||
/* set the proper interrupt */
|
||||
amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id);
|
||||
/* do the flip (mmio) */
|
||||
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
|
||||
/* set the flip status */
|
||||
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
|
||||
|
||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
up_read(&adev->exclusive_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle unpin events outside the interrupt handler proper.
|
||||
*/
|
||||
static void amdgpu_unpin_work_func(struct work_struct *__work)
|
||||
{
|
||||
struct amdgpu_flip_work *work =
|
||||
container_of(__work, struct amdgpu_flip_work, unpin_work);
|
||||
int r;
|
||||
|
||||
/* unpin of the old buffer */
|
||||
r = amdgpu_bo_reserve(work->old_rbo, false);
|
||||
if (likely(r == 0)) {
|
||||
r = amdgpu_bo_unpin(work->old_rbo);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("failed to unpin buffer after flip\n");
|
||||
}
|
||||
amdgpu_bo_unreserve(work->old_rbo);
|
||||
} else
|
||||
DRM_ERROR("failed to reserve buffer after flip\n");
|
||||
|
||||
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event,
|
||||
uint32_t page_flip_flags)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_framebuffer *old_amdgpu_fb;
|
||||
struct amdgpu_framebuffer *new_amdgpu_fb;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_flip_work *work;
|
||||
struct amdgpu_bo *new_rbo;
|
||||
unsigned long flags;
|
||||
u64 tiling_flags;
|
||||
u64 base;
|
||||
int r;
|
||||
|
||||
work = kzalloc(sizeof *work, GFP_KERNEL);
|
||||
if (work == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_WORK(&work->flip_work, amdgpu_flip_work_func);
|
||||
INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
|
||||
|
||||
work->event = event;
|
||||
work->adev = adev;
|
||||
work->crtc_id = amdgpu_crtc->crtc_id;
|
||||
|
||||
/* schedule unpin of the old buffer */
|
||||
old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
obj = old_amdgpu_fb->obj;
|
||||
|
||||
/* take a reference to the old object */
|
||||
drm_gem_object_reference(obj);
|
||||
work->old_rbo = gem_to_amdgpu_bo(obj);
|
||||
|
||||
new_amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
obj = new_amdgpu_fb->obj;
|
||||
new_rbo = gem_to_amdgpu_bo(obj);
|
||||
|
||||
/* pin the new buffer */
|
||||
r = amdgpu_bo_reserve(new_rbo, false);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, &base);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(new_rbo);
|
||||
r = -EINVAL;
|
||||
DRM_ERROR("failed to pin new rbo buffer before flip\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
|
||||
amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
|
||||
amdgpu_bo_unreserve(new_rbo);
|
||||
|
||||
work->base = base;
|
||||
|
||||
r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to get vblank before flip\n");
|
||||
goto pflip_cleanup;
|
||||
}
|
||||
|
||||
/* we borrow the event spin lock for protecting flip_wrok */
|
||||
spin_lock_irqsave(&crtc->dev->event_lock, flags);
|
||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
|
||||
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
|
||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
r = -EBUSY;
|
||||
goto vblank_cleanup;
|
||||
}
|
||||
|
||||
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
|
||||
amdgpu_crtc->pflip_works = work;
|
||||
|
||||
/* update crtc fb */
|
||||
crtc->primary->fb = fb;
|
||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
queue_work(amdgpu_crtc->pflip_queue, &work->flip_work);
|
||||
return 0;
|
||||
|
||||
vblank_cleanup:
|
||||
drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id);
|
||||
|
||||
pflip_cleanup:
|
||||
if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
|
||||
DRM_ERROR("failed to reserve new rbo in error path\n");
|
||||
goto cleanup;
|
||||
}
|
||||
if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
|
||||
DRM_ERROR("failed to unpin new rbo in error path\n");
|
||||
}
|
||||
amdgpu_bo_unreserve(new_rbo);
|
||||
|
||||
cleanup:
|
||||
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
|
||||
fence_put(work->fence);
|
||||
kfree(work);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_crtc_set_config(struct drm_mode_set *set)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct amdgpu_device *adev;
|
||||
struct drm_crtc *crtc;
|
||||
bool active = false;
|
||||
int ret;
|
||||
|
||||
if (!set || !set->crtc)
|
||||
return -EINVAL;
|
||||
|
||||
dev = set->crtc->dev;
|
||||
|
||||
ret = pm_runtime_get_sync(dev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = drm_crtc_helper_set_config(set);
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
if (crtc->enabled)
|
||||
active = true;
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
|
||||
adev = dev->dev_private;
|
||||
/* if we have active crtcs and we don't have a power ref,
|
||||
take the current one */
|
||||
if (active && !adev->have_disp_power_ref) {
|
||||
adev->have_disp_power_ref = true;
|
||||
return ret;
|
||||
}
|
||||
/* if we have no active crtcs, then drop the power ref
|
||||
we got before */
|
||||
if (!active && adev->have_disp_power_ref) {
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
adev->have_disp_power_ref = false;
|
||||
}
|
||||
|
||||
/* drop the power reference we got coming in here */
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *encoder_names[38] = {
|
||||
"NONE",
|
||||
"INTERNAL_LVDS",
|
||||
"INTERNAL_TMDS1",
|
||||
"INTERNAL_TMDS2",
|
||||
"INTERNAL_DAC1",
|
||||
"INTERNAL_DAC2",
|
||||
"INTERNAL_SDVOA",
|
||||
"INTERNAL_SDVOB",
|
||||
"SI170B",
|
||||
"CH7303",
|
||||
"CH7301",
|
||||
"INTERNAL_DVO1",
|
||||
"EXTERNAL_SDVOA",
|
||||
"EXTERNAL_SDVOB",
|
||||
"TITFP513",
|
||||
"INTERNAL_LVTM1",
|
||||
"VT1623",
|
||||
"HDMI_SI1930",
|
||||
"HDMI_INTERNAL",
|
||||
"INTERNAL_KLDSCP_TMDS1",
|
||||
"INTERNAL_KLDSCP_DVO1",
|
||||
"INTERNAL_KLDSCP_DAC1",
|
||||
"INTERNAL_KLDSCP_DAC2",
|
||||
"SI178",
|
||||
"MVPU_FPGA",
|
||||
"INTERNAL_DDI",
|
||||
"VT1625",
|
||||
"HDMI_SI1932",
|
||||
"DP_AN9801",
|
||||
"DP_DP501",
|
||||
"INTERNAL_UNIPHY",
|
||||
"INTERNAL_KLDSCP_LVTMA",
|
||||
"INTERNAL_UNIPHY1",
|
||||
"INTERNAL_UNIPHY2",
|
||||
"NUTMEG",
|
||||
"TRAVIS",
|
||||
"INTERNAL_VCE",
|
||||
"INTERNAL_UNIPHY3",
|
||||
};
|
||||
|
||||
static const char *hpd_names[6] = {
|
||||
"HPD1",
|
||||
"HPD2",
|
||||
"HPD3",
|
||||
"HPD4",
|
||||
"HPD5",
|
||||
"HPD6",
|
||||
};
|
||||
|
||||
void amdgpu_print_display_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_connector *amdgpu_connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct amdgpu_encoder *amdgpu_encoder;
|
||||
uint32_t devices;
|
||||
int i = 0;
|
||||
|
||||
DRM_INFO("AMDGPU Display Connectors\n");
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
amdgpu_connector = to_amdgpu_connector(connector);
|
||||
DRM_INFO("Connector %d:\n", i);
|
||||
DRM_INFO(" %s\n", connector->name);
|
||||
if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
|
||||
DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
|
||||
if (amdgpu_connector->ddc_bus) {
|
||||
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
|
||||
amdgpu_connector->ddc_bus->rec.mask_clk_reg,
|
||||
amdgpu_connector->ddc_bus->rec.mask_data_reg,
|
||||
amdgpu_connector->ddc_bus->rec.a_clk_reg,
|
||||
amdgpu_connector->ddc_bus->rec.a_data_reg,
|
||||
amdgpu_connector->ddc_bus->rec.en_clk_reg,
|
||||
amdgpu_connector->ddc_bus->rec.en_data_reg,
|
||||
amdgpu_connector->ddc_bus->rec.y_clk_reg,
|
||||
amdgpu_connector->ddc_bus->rec.y_data_reg);
|
||||
if (amdgpu_connector->router.ddc_valid)
|
||||
DRM_INFO(" DDC Router 0x%x/0x%x\n",
|
||||
amdgpu_connector->router.ddc_mux_control_pin,
|
||||
amdgpu_connector->router.ddc_mux_state);
|
||||
if (amdgpu_connector->router.cd_valid)
|
||||
DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
|
||||
amdgpu_connector->router.cd_mux_control_pin,
|
||||
amdgpu_connector->router.cd_mux_state);
|
||||
} else {
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
|
||||
DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
|
||||
}
|
||||
DRM_INFO(" Encoders:\n");
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
devices = amdgpu_encoder->devices & amdgpu_connector->devices;
|
||||
if (devices) {
|
||||
if (devices & ATOM_DEVICE_CRT1_SUPPORT)
|
||||
DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_CRT2_SUPPORT)
|
||||
DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_LCD1_SUPPORT)
|
||||
DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP1_SUPPORT)
|
||||
DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP2_SUPPORT)
|
||||
DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP3_SUPPORT)
|
||||
DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP4_SUPPORT)
|
||||
DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP5_SUPPORT)
|
||||
DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_DFP6_SUPPORT)
|
||||
DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_TV1_SUPPORT)
|
||||
DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
|
||||
if (devices & ATOM_DEVICE_CV_SUPPORT)
|
||||
DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
|
||||
}
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ddc_probe
|
||||
*
|
||||
*/
|
||||
bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
|
||||
bool use_aux)
|
||||
{
|
||||
u8 out = 0x0;
|
||||
u8 buf[8];
|
||||
int ret;
|
||||
struct i2c_msg msgs[] = {
|
||||
{
|
||||
.addr = DDC_ADDR,
|
||||
.flags = 0,
|
||||
.len = 1,
|
||||
.buf = &out,
|
||||
},
|
||||
{
|
||||
.addr = DDC_ADDR,
|
||||
.flags = I2C_M_RD,
|
||||
.len = 8,
|
||||
.buf = buf,
|
||||
}
|
||||
};
|
||||
|
||||
/* on hw with routers, select right port */
|
||||
if (amdgpu_connector->router.ddc_valid)
|
||||
amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
|
||||
|
||||
if (use_aux) {
|
||||
ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
|
||||
} else {
|
||||
ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
|
||||
}
|
||||
|
||||
if (ret != 2)
|
||||
/* Couldn't find an accessible DDC on this connector */
|
||||
return false;
|
||||
/* Probe also for valid EDID header
|
||||
* EDID header starts with:
|
||||
* 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
|
||||
* Only the first 6 bytes must be valid as
|
||||
* drm_edid_block_valid() can fix the last 2 bytes */
|
||||
if (drm_edid_header_is_valid(buf) < 6) {
|
||||
/* Couldn't find an accessible EDID on this
|
||||
* connector */
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
|
||||
if (amdgpu_fb->obj) {
|
||||
drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
|
||||
}
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(amdgpu_fb);
|
||||
}
|
||||
|
||||
static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv,
|
||||
unsigned int *handle)
|
||||
{
|
||||
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
|
||||
return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle);
|
||||
}
|
||||
|
||||
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
|
||||
.destroy = amdgpu_user_framebuffer_destroy,
|
||||
.create_handle = amdgpu_user_framebuffer_create_handle,
|
||||
};
|
||||
|
||||
int
|
||||
amdgpu_framebuffer_init(struct drm_device *dev,
|
||||
struct amdgpu_framebuffer *rfb,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *obj)
|
||||
{
|
||||
int ret;
|
||||
rfb->obj = obj;
|
||||
drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
|
||||
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
|
||||
if (ret) {
|
||||
rfb->obj = NULL;
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_framebuffer *
|
||||
amdgpu_user_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_framebuffer *amdgpu_fb;
|
||||
int ret;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
|
||||
if (obj == NULL) {
|
||||
dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
|
||||
"can't create framebuffer\n", mode_cmd->handles[0]);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
|
||||
if (amdgpu_fb == NULL) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
|
||||
if (ret) {
|
||||
kfree(amdgpu_fb);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return &amdgpu_fb->base;
|
||||
}
|
||||
|
||||
static void amdgpu_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
amdgpu_fb_output_poll_changed(adev);
|
||||
}
|
||||
|
||||
const struct drm_mode_config_funcs amdgpu_mode_funcs = {
|
||||
.fb_create = amdgpu_user_framebuffer_create,
|
||||
.output_poll_changed = amdgpu_output_poll_changed
|
||||
};
|
||||
|
||||
static struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
|
||||
{ { UNDERSCAN_OFF, "off" },
|
||||
{ UNDERSCAN_ON, "on" },
|
||||
{ UNDERSCAN_AUTO, "auto" },
|
||||
};
|
||||
|
||||
static struct drm_prop_enum_list amdgpu_audio_enum_list[] =
|
||||
{ { AMDGPU_AUDIO_DISABLE, "off" },
|
||||
{ AMDGPU_AUDIO_ENABLE, "on" },
|
||||
{ AMDGPU_AUDIO_AUTO, "auto" },
|
||||
};
|
||||
|
||||
/* XXX support different dither options? spatial, temporal, both, etc. */
|
||||
static struct drm_prop_enum_list amdgpu_dither_enum_list[] =
|
||||
{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
|
||||
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
|
||||
};
|
||||
|
||||
int amdgpu_modeset_create_props(struct amdgpu_device *adev)
|
||||
{
|
||||
int sz;
|
||||
|
||||
if (adev->is_atom_bios) {
|
||||
adev->mode_info.coherent_mode_property =
|
||||
drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
|
||||
if (!adev->mode_info.coherent_mode_property)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
adev->mode_info.load_detect_property =
|
||||
drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
|
||||
if (!adev->mode_info.load_detect_property)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_mode_create_scaling_mode_property(adev->ddev);
|
||||
|
||||
sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
|
||||
adev->mode_info.underscan_property =
|
||||
drm_property_create_enum(adev->ddev, 0,
|
||||
"underscan",
|
||||
amdgpu_underscan_enum_list, sz);
|
||||
|
||||
adev->mode_info.underscan_hborder_property =
|
||||
drm_property_create_range(adev->ddev, 0,
|
||||
"underscan hborder", 0, 128);
|
||||
if (!adev->mode_info.underscan_hborder_property)
|
||||
return -ENOMEM;
|
||||
|
||||
adev->mode_info.underscan_vborder_property =
|
||||
drm_property_create_range(adev->ddev, 0,
|
||||
"underscan vborder", 0, 128);
|
||||
if (!adev->mode_info.underscan_vborder_property)
|
||||
return -ENOMEM;
|
||||
|
||||
sz = ARRAY_SIZE(amdgpu_audio_enum_list);
|
||||
adev->mode_info.audio_property =
|
||||
drm_property_create_enum(adev->ddev, 0,
|
||||
"audio",
|
||||
amdgpu_audio_enum_list, sz);
|
||||
|
||||
sz = ARRAY_SIZE(amdgpu_dither_enum_list);
|
||||
adev->mode_info.dither_property =
|
||||
drm_property_create_enum(adev->ddev, 0,
|
||||
"dither",
|
||||
amdgpu_dither_enum_list, sz);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_update_display_priority(struct amdgpu_device *adev)
|
||||
{
|
||||
/* adjustment options for the display watermarks */
|
||||
if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
|
||||
adev->mode_info.disp_priority = 0;
|
||||
else
|
||||
adev->mode_info.disp_priority = amdgpu_disp_priority;
|
||||
|
||||
}
|
||||
|
||||
static bool is_hdtv_mode(const struct drm_display_mode *mode)
|
||||
{
|
||||
/* try and guess if this is a tv or a monitor */
|
||||
if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
|
||||
(mode->vdisplay == 576) || /* 576p */
|
||||
(mode->vdisplay == 720) || /* 720p */
|
||||
(mode->vdisplay == 1080)) /* 1080p */
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_encoder *encoder;
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_encoder *amdgpu_encoder;
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_connector *amdgpu_connector;
|
||||
u32 src_v = 1, dst_v = 1;
|
||||
u32 src_h = 1, dst_h = 1;
|
||||
|
||||
amdgpu_crtc->h_border = 0;
|
||||
amdgpu_crtc->v_border = 0;
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc != crtc)
|
||||
continue;
|
||||
amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
connector = amdgpu_get_connector_for_encoder(encoder);
|
||||
amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
/* set scaling */
|
||||
if (amdgpu_encoder->rmx_type == RMX_OFF)
|
||||
amdgpu_crtc->rmx_type = RMX_OFF;
|
||||
else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
|
||||
mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
|
||||
amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
|
||||
else
|
||||
amdgpu_crtc->rmx_type = RMX_OFF;
|
||||
/* copy native mode */
|
||||
memcpy(&amdgpu_crtc->native_mode,
|
||||
&amdgpu_encoder->native_mode,
|
||||
sizeof(struct drm_display_mode));
|
||||
src_v = crtc->mode.vdisplay;
|
||||
dst_v = amdgpu_crtc->native_mode.vdisplay;
|
||||
src_h = crtc->mode.hdisplay;
|
||||
dst_h = amdgpu_crtc->native_mode.hdisplay;
|
||||
|
||||
/* fix up for overscan on hdmi */
|
||||
if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
|
||||
((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
|
||||
((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
|
||||
drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
|
||||
is_hdtv_mode(mode)))) {
|
||||
if (amdgpu_encoder->underscan_hborder != 0)
|
||||
amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
|
||||
else
|
||||
amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
|
||||
if (amdgpu_encoder->underscan_vborder != 0)
|
||||
amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
|
||||
else
|
||||
amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
|
||||
amdgpu_crtc->rmx_type = RMX_FULL;
|
||||
src_v = crtc->mode.vdisplay;
|
||||
dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
|
||||
src_h = crtc->mode.hdisplay;
|
||||
dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
|
||||
}
|
||||
}
|
||||
if (amdgpu_crtc->rmx_type != RMX_OFF) {
|
||||
fixed20_12 a, b;
|
||||
a.full = dfixed_const(src_v);
|
||||
b.full = dfixed_const(dst_v);
|
||||
amdgpu_crtc->vsc.full = dfixed_div(a, b);
|
||||
a.full = dfixed_const(src_h);
|
||||
b.full = dfixed_const(dst_h);
|
||||
amdgpu_crtc->hsc.full = dfixed_div(a, b);
|
||||
} else {
|
||||
amdgpu_crtc->vsc.full = dfixed_const(1);
|
||||
amdgpu_crtc->hsc.full = dfixed_const(1);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve current video scanout position of crtc on a given gpu, and
|
||||
* an optional accurate timestamp of when query happened.
|
||||
*
|
||||
* \param dev Device to query.
|
||||
* \param crtc Crtc to query.
|
||||
* \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
|
||||
* \param *vpos Location where vertical scanout position should be stored.
|
||||
* \param *hpos Location where horizontal scanout position should go.
|
||||
* \param *stime Target location for timestamp taken immediately before
|
||||
* scanout position query. Can be NULL to skip timestamp.
|
||||
* \param *etime Target location for timestamp taken immediately after
|
||||
* scanout position query. Can be NULL to skip timestamp.
|
||||
*
|
||||
* Returns vpos as a positive number while in active scanout area.
|
||||
* Returns vpos as a negative number inside vblank, counting the number
|
||||
* of scanlines to go until end of vblank, e.g., -1 means "one scanline
|
||||
* until start of active scanout / end of vblank."
|
||||
*
|
||||
* \return Flags, or'ed together as follows:
|
||||
*
|
||||
* DRM_SCANOUTPOS_VALID = Query successful.
|
||||
* DRM_SCANOUTPOS_INVBL = Inside vblank.
|
||||
* DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
|
||||
* this flag means that returned position may be offset by a constant but
|
||||
* unknown small number of scanlines wrt. real scanout position.
|
||||
*
|
||||
*/
|
||||
int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
|
||||
int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
|
||||
{
|
||||
u32 vbl = 0, position = 0;
|
||||
int vbl_start, vbl_end, vtotal, ret = 0;
|
||||
bool in_vbl = true;
|
||||
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
|
||||
|
||||
/* Get optional system timestamp before query. */
|
||||
if (stime)
|
||||
*stime = ktime_get();
|
||||
|
||||
if (amdgpu_display_page_flip_get_scanoutpos(adev, crtc, &vbl, &position) == 0)
|
||||
ret |= DRM_SCANOUTPOS_VALID;
|
||||
|
||||
/* Get optional system timestamp after query. */
|
||||
if (etime)
|
||||
*etime = ktime_get();
|
||||
|
||||
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
|
||||
|
||||
/* Decode into vertical and horizontal scanout position. */
|
||||
*vpos = position & 0x1fff;
|
||||
*hpos = (position >> 16) & 0x1fff;
|
||||
|
||||
/* Valid vblank area boundaries from gpu retrieved? */
|
||||
if (vbl > 0) {
|
||||
/* Yes: Decode. */
|
||||
ret |= DRM_SCANOUTPOS_ACCURATE;
|
||||
vbl_start = vbl & 0x1fff;
|
||||
vbl_end = (vbl >> 16) & 0x1fff;
|
||||
}
|
||||
else {
|
||||
/* No: Fake something reasonable which gives at least ok results. */
|
||||
vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
|
||||
vbl_end = 0;
|
||||
}
|
||||
|
||||
/* Test scanout position against vblank region. */
|
||||
if ((*vpos < vbl_start) && (*vpos >= vbl_end))
|
||||
in_vbl = false;
|
||||
|
||||
/* Check if inside vblank area and apply corrective offsets:
|
||||
* vpos will then be >=0 in video scanout area, but negative
|
||||
* within vblank area, counting down the number of lines until
|
||||
* start of scanout.
|
||||
*/
|
||||
|
||||
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
|
||||
if (in_vbl && (*vpos >= vbl_start)) {
|
||||
vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
|
||||
*vpos = *vpos - vtotal;
|
||||
}
|
||||
|
||||
/* Correct for shifted end of vbl at vbl_end. */
|
||||
*vpos = *vpos - vbl_end;
|
||||
|
||||
/* In vblank? */
|
||||
if (in_vbl)
|
||||
ret |= DRM_SCANOUTPOS_IN_VBLANK;
|
||||
|
||||
/* Is vpos outside nominal vblank area, but less than
|
||||
* 1/100 of a frame height away from start of vblank?
|
||||
* If so, assume this isn't a massively delayed vblank
|
||||
* interrupt, but a vblank interrupt that fired a few
|
||||
* microseconds before true start of vblank. Compensate
|
||||
* by adding a full frame duration to the final timestamp.
|
||||
* Happens, e.g., on ATI R500, R600.
|
||||
*
|
||||
* We only do this if DRM_CALLED_FROM_VBLIRQ.
|
||||
*/
|
||||
if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
|
||||
vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
|
||||
vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
|
||||
|
||||
if (vbl_start - *vpos < vtotal / 100) {
|
||||
*vpos -= vtotal;
|
||||
|
||||
/* Signal this correction as "applied". */
|
||||
ret |= 0x8;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
|
||||
{
|
||||
if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
|
||||
return AMDGPU_CRTC_IRQ_NONE;
|
||||
|
||||
switch (crtc) {
|
||||
case 0:
|
||||
return AMDGPU_CRTC_IRQ_VBLANK1;
|
||||
case 1:
|
||||
return AMDGPU_CRTC_IRQ_VBLANK2;
|
||||
case 2:
|
||||
return AMDGPU_CRTC_IRQ_VBLANK3;
|
||||
case 3:
|
||||
return AMDGPU_CRTC_IRQ_VBLANK4;
|
||||
case 4:
|
||||
return AMDGPU_CRTC_IRQ_VBLANK5;
|
||||
case 5:
|
||||
return AMDGPU_CRTC_IRQ_VBLANK6;
|
||||
default:
|
||||
return AMDGPU_CRTC_IRQ_NONE;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,955 @@
|
|||
/*
|
||||
* Copyright 2011 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "amdgpu_i2c.h"
|
||||
#include "amdgpu_dpm.h"
|
||||
#include "atom.h"
|
||||
|
||||
void amdgpu_dpm_print_class_info(u32 class, u32 class2)
|
||||
{
|
||||
printk("\tui class: ");
|
||||
switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
|
||||
case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
|
||||
default:
|
||||
printk("none\n");
|
||||
break;
|
||||
case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
|
||||
printk("battery\n");
|
||||
break;
|
||||
case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
|
||||
printk("balanced\n");
|
||||
break;
|
||||
case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
|
||||
printk("performance\n");
|
||||
break;
|
||||
}
|
||||
printk("\tinternal class: ");
|
||||
if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
|
||||
(class2 == 0))
|
||||
printk("none");
|
||||
else {
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
|
||||
printk("boot ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
|
||||
printk("thermal ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
|
||||
printk("limited_pwr ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_REST)
|
||||
printk("rest ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
|
||||
printk("forced ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
|
||||
printk("3d_perf ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
|
||||
printk("ovrdrv ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
|
||||
printk("uvd ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
|
||||
printk("3d_low ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
|
||||
printk("acpi ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
|
||||
printk("uvd_hd2 ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
|
||||
printk("uvd_hd ");
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
|
||||
printk("uvd_sd ");
|
||||
if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
|
||||
printk("limited_pwr2 ");
|
||||
if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
|
||||
printk("ulv ");
|
||||
if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
|
||||
printk("uvd_mvc ");
|
||||
}
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
void amdgpu_dpm_print_cap_info(u32 caps)
|
||||
{
|
||||
printk("\tcaps: ");
|
||||
if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
|
||||
printk("single_disp ");
|
||||
if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
|
||||
printk("video ");
|
||||
if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
|
||||
printk("no_dc ");
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
|
||||
struct amdgpu_ps *rps)
|
||||
{
|
||||
printk("\tstatus: ");
|
||||
if (rps == adev->pm.dpm.current_ps)
|
||||
printk("c ");
|
||||
if (rps == adev->pm.dpm.requested_ps)
|
||||
printk("r ");
|
||||
if (rps == adev->pm.dpm.boot_ps)
|
||||
printk("b ");
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
u32 line_time_us, vblank_lines;
|
||||
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
|
||||
|
||||
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
|
||||
line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
|
||||
amdgpu_crtc->hw_mode.clock;
|
||||
vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
|
||||
amdgpu_crtc->hw_mode.crtc_vdisplay +
|
||||
(amdgpu_crtc->v_border * 2);
|
||||
vblank_time_us = vblank_lines * line_time_us;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vblank_time_us;
|
||||
}
|
||||
|
||||
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
u32 vrefresh = 0;
|
||||
|
||||
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
|
||||
vrefresh = amdgpu_crtc->hw_mode.vrefresh;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vrefresh;
|
||||
}
|
||||
|
||||
void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
|
||||
u32 *p, u32 *u)
|
||||
{
|
||||
u32 b_c = 0;
|
||||
u32 i_c;
|
||||
u32 tmp;
|
||||
|
||||
i_c = (i * r_c) / 100;
|
||||
tmp = i_c >> p_b;
|
||||
|
||||
while (tmp) {
|
||||
b_c++;
|
||||
tmp >>= 1;
|
||||
}
|
||||
|
||||
*u = (b_c + 1) / 2;
|
||||
*p = i_c / (1 << (2 * (*u)));
|
||||
}
|
||||
|
||||
int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
|
||||
{
|
||||
u32 k, a, ah, al;
|
||||
u32 t1;
|
||||
|
||||
if ((fl == 0) || (fh == 0) || (fl > fh))
|
||||
return -EINVAL;
|
||||
|
||||
k = (100 * fh) / fl;
|
||||
t1 = (t * (k - 100));
|
||||
a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
|
||||
a = (a + 5) / 10;
|
||||
ah = ((a * t) + 5000) / 10000;
|
||||
al = a - ah;
|
||||
|
||||
*th = t - ah;
|
||||
*tl = t + al;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool amdgpu_is_uvd_state(u32 class, u32 class2)
|
||||
{
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
|
||||
return true;
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
|
||||
return true;
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
|
||||
return true;
|
||||
if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
|
||||
return true;
|
||||
if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
|
||||
{
|
||||
switch (sensor) {
|
||||
case THERMAL_TYPE_RV6XX:
|
||||
case THERMAL_TYPE_RV770:
|
||||
case THERMAL_TYPE_EVERGREEN:
|
||||
case THERMAL_TYPE_SUMO:
|
||||
case THERMAL_TYPE_NI:
|
||||
case THERMAL_TYPE_SI:
|
||||
case THERMAL_TYPE_CI:
|
||||
case THERMAL_TYPE_KV:
|
||||
return true;
|
||||
case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
|
||||
case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
|
||||
return false; /* need special handling */
|
||||
case THERMAL_TYPE_NONE:
|
||||
case THERMAL_TYPE_EXTERNAL:
|
||||
case THERMAL_TYPE_EXTERNAL_GPIO:
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
union power_info {
|
||||
struct _ATOM_POWERPLAY_INFO info;
|
||||
struct _ATOM_POWERPLAY_INFO_V2 info_2;
|
||||
struct _ATOM_POWERPLAY_INFO_V3 info_3;
|
||||
struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
|
||||
struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
|
||||
struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
|
||||
struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
|
||||
struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
|
||||
};
|
||||
|
||||
union fan_info {
|
||||
struct _ATOM_PPLIB_FANTABLE fan;
|
||||
struct _ATOM_PPLIB_FANTABLE2 fan2;
|
||||
struct _ATOM_PPLIB_FANTABLE3 fan3;
|
||||
};
|
||||
|
||||
static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
|
||||
ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
|
||||
{
|
||||
u32 size = atom_table->ucNumEntries *
|
||||
sizeof(struct amdgpu_clock_voltage_dependency_entry);
|
||||
int i;
|
||||
ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
|
||||
|
||||
amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
|
||||
if (!amdgpu_table->entries)
|
||||
return -ENOMEM;
|
||||
|
||||
entry = &atom_table->entries[0];
|
||||
for (i = 0; i < atom_table->ucNumEntries; i++) {
|
||||
amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
|
||||
(entry->ucClockHigh << 16);
|
||||
amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
|
||||
entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
|
||||
((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
|
||||
}
|
||||
amdgpu_table->count = atom_table->ucNumEntries;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_get_platform_caps(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
union power_info *power_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
|
||||
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset))
|
||||
return -EINVAL;
|
||||
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
|
||||
adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
|
||||
adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
|
||||
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
|
||||
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
|
||||
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
|
||||
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
|
||||
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
|
||||
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
|
||||
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
|
||||
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
|
||||
|
||||
int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
union power_info *power_info;
|
||||
union fan_info *fan_info;
|
||||
ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
int ret, i;
|
||||
|
||||
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset))
|
||||
return -EINVAL;
|
||||
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
/* fan table */
|
||||
if (le16_to_cpu(power_info->pplib.usTableSize) >=
|
||||
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
|
||||
if (power_info->pplib3.usFanTableOffset) {
|
||||
fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib3.usFanTableOffset));
|
||||
adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
|
||||
adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
|
||||
adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
|
||||
adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
|
||||
adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
|
||||
adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
|
||||
adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
|
||||
if (fan_info->fan.ucFanTableFormat >= 2)
|
||||
adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
|
||||
else
|
||||
adev->pm.dpm.fan.t_max = 10900;
|
||||
adev->pm.dpm.fan.cycle_delay = 100000;
|
||||
if (fan_info->fan.ucFanTableFormat >= 3) {
|
||||
adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
|
||||
adev->pm.dpm.fan.default_max_fan_pwm =
|
||||
le16_to_cpu(fan_info->fan3.usFanPWMMax);
|
||||
adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
|
||||
adev->pm.dpm.fan.fan_output_sensitivity =
|
||||
le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
|
||||
}
|
||||
adev->pm.dpm.fan.ucode_fan_control = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* clock dependancy tables, shedding tables */
|
||||
if (le16_to_cpu(power_info->pplib.usTableSize) >=
|
||||
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
|
||||
if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
|
||||
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
|
||||
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
|
||||
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
|
||||
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
|
||||
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
|
||||
(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
|
||||
if (clk_v->ucNumEntries) {
|
||||
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
|
||||
le16_to_cpu(clk_v->entries[0].usSclkLow) |
|
||||
(clk_v->entries[0].ucSclkHigh << 16);
|
||||
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
|
||||
le16_to_cpu(clk_v->entries[0].usMclkLow) |
|
||||
(clk_v->entries[0].ucMclkHigh << 16);
|
||||
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
|
||||
le16_to_cpu(clk_v->entries[0].usVddc);
|
||||
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
|
||||
le16_to_cpu(clk_v->entries[0].usVddci);
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
|
||||
ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
|
||||
(ATOM_PPLIB_PhaseSheddingLimits_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
|
||||
ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
|
||||
|
||||
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
|
||||
kzalloc(psl->ucNumEntries *
|
||||
sizeof(struct amdgpu_phase_shedding_limits_entry),
|
||||
GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
entry = &psl->entries[0];
|
||||
for (i = 0; i < psl->ucNumEntries; i++) {
|
||||
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
|
||||
le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
|
||||
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
|
||||
le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
|
||||
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
|
||||
le16_to_cpu(entry->usVoltage);
|
||||
entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
|
||||
((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
|
||||
}
|
||||
adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
|
||||
psl->ucNumEntries;
|
||||
}
|
||||
}
|
||||
|
||||
/* cac data */
|
||||
if (le16_to_cpu(power_info->pplib.usTableSize) >=
|
||||
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
|
||||
adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
|
||||
adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
|
||||
adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
|
||||
adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
|
||||
if (adev->pm.dpm.tdp_od_limit)
|
||||
adev->pm.dpm.power_control = true;
|
||||
else
|
||||
adev->pm.dpm.power_control = false;
|
||||
adev->pm.dpm.tdp_adjustment = 0;
|
||||
adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
|
||||
adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
|
||||
adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
|
||||
if (power_info->pplib5.usCACLeakageTableOffset) {
|
||||
ATOM_PPLIB_CAC_Leakage_Table *cac_table =
|
||||
(ATOM_PPLIB_CAC_Leakage_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
|
||||
ATOM_PPLIB_CAC_Leakage_Record *entry;
|
||||
u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
|
||||
adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
entry = &cac_table->entries[0];
|
||||
for (i = 0; i < cac_table->ucNumEntries; i++) {
|
||||
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
|
||||
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
|
||||
le16_to_cpu(entry->usVddc1);
|
||||
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
|
||||
le16_to_cpu(entry->usVddc2);
|
||||
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
|
||||
le16_to_cpu(entry->usVddc3);
|
||||
} else {
|
||||
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
|
||||
le16_to_cpu(entry->usVddc);
|
||||
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
|
||||
le32_to_cpu(entry->ulLeakageValue);
|
||||
}
|
||||
entry = (ATOM_PPLIB_CAC_Leakage_Record *)
|
||||
((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
|
||||
}
|
||||
adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
|
||||
}
|
||||
}
|
||||
|
||||
/* ext tables */
|
||||
if (le16_to_cpu(power_info->pplib.usTableSize) >=
|
||||
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
|
||||
ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
|
||||
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
|
||||
ext_hdr->usVCETableOffset) {
|
||||
VCEClockInfoArray *array = (VCEClockInfoArray *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
|
||||
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
|
||||
(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
|
||||
1 + array->ucNumEntries * sizeof(VCEClockInfo));
|
||||
ATOM_PPLIB_VCE_State_Table *states =
|
||||
(ATOM_PPLIB_VCE_State_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
|
||||
1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
|
||||
1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
|
||||
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
|
||||
ATOM_PPLIB_VCE_State_Record *state_entry;
|
||||
VCEClockInfo *vce_clk;
|
||||
u32 size = limits->numEntries *
|
||||
sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
state_entry = &states->entries[0];
|
||||
for (i = 0; i < limits->numEntries; i++) {
|
||||
vce_clk = (VCEClockInfo *)
|
||||
((u8 *)&array->entries[0] +
|
||||
(entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
|
||||
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
|
||||
le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
|
||||
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
|
||||
le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
|
||||
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
|
||||
le16_to_cpu(entry->usVoltage);
|
||||
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
|
||||
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
|
||||
}
|
||||
for (i = 0; i < states->numEntries; i++) {
|
||||
if (i >= AMDGPU_MAX_VCE_LEVELS)
|
||||
break;
|
||||
vce_clk = (VCEClockInfo *)
|
||||
((u8 *)&array->entries[0] +
|
||||
(state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
|
||||
adev->pm.dpm.vce_states[i].evclk =
|
||||
le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
|
||||
adev->pm.dpm.vce_states[i].ecclk =
|
||||
le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
|
||||
adev->pm.dpm.vce_states[i].clk_idx =
|
||||
state_entry->ucClockInfoIndex & 0x3f;
|
||||
adev->pm.dpm.vce_states[i].pstate =
|
||||
(state_entry->ucClockInfoIndex & 0xc0) >> 6;
|
||||
state_entry = (ATOM_PPLIB_VCE_State_Record *)
|
||||
((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
|
||||
}
|
||||
}
|
||||
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
|
||||
ext_hdr->usUVDTableOffset) {
|
||||
UVDClockInfoArray *array = (UVDClockInfoArray *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
|
||||
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
|
||||
(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
|
||||
1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
|
||||
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
|
||||
u32 size = limits->numEntries *
|
||||
sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
for (i = 0; i < limits->numEntries; i++) {
|
||||
UVDClockInfo *uvd_clk = (UVDClockInfo *)
|
||||
((u8 *)&array->entries[0] +
|
||||
(entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
|
||||
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
|
||||
le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
|
||||
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
|
||||
le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
|
||||
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
|
||||
le16_to_cpu(entry->usVoltage);
|
||||
entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
|
||||
((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
|
||||
}
|
||||
}
|
||||
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
|
||||
ext_hdr->usSAMUTableOffset) {
|
||||
ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
|
||||
(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
|
||||
ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
|
||||
u32 size = limits->numEntries *
|
||||
sizeof(struct amdgpu_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
for (i = 0; i < limits->numEntries; i++) {
|
||||
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
|
||||
le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
|
||||
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
|
||||
le16_to_cpu(entry->usVoltage);
|
||||
entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
|
||||
((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
|
||||
}
|
||||
}
|
||||
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
|
||||
ext_hdr->usPPMTableOffset) {
|
||||
ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usPPMTableOffset));
|
||||
adev->pm.dpm.dyn_state.ppm_table =
|
||||
kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.ppm_table) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
|
||||
adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
|
||||
le16_to_cpu(ppm->usCpuCoreNumber);
|
||||
adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
|
||||
le32_to_cpu(ppm->ulPlatformTDP);
|
||||
adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
|
||||
le32_to_cpu(ppm->ulSmallACPlatformTDP);
|
||||
adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
|
||||
le32_to_cpu(ppm->ulPlatformTDC);
|
||||
adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
|
||||
le32_to_cpu(ppm->ulSmallACPlatformTDC);
|
||||
adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
|
||||
le32_to_cpu(ppm->ulApuTDP);
|
||||
adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
|
||||
le32_to_cpu(ppm->ulDGpuTDP);
|
||||
adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
|
||||
le32_to_cpu(ppm->ulDGpuUlvPower);
|
||||
adev->pm.dpm.dyn_state.ppm_table->tj_max =
|
||||
le32_to_cpu(ppm->ulTjmax);
|
||||
}
|
||||
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
|
||||
ext_hdr->usACPTableOffset) {
|
||||
ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
|
||||
(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
|
||||
ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
|
||||
u32 size = limits->numEntries *
|
||||
sizeof(struct amdgpu_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
for (i = 0; i < limits->numEntries; i++) {
|
||||
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
|
||||
le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
|
||||
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
|
||||
le16_to_cpu(entry->usVoltage);
|
||||
entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
|
||||
((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
|
||||
}
|
||||
}
|
||||
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
|
||||
ext_hdr->usPowerTuneTableOffset) {
|
||||
u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
|
||||
ATOM_PowerTune_Table *pt;
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table =
|
||||
kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (rev > 0) {
|
||||
ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
|
||||
ppt->usMaximumPowerDeliveryLimit;
|
||||
pt = &ppt->power_tune_table;
|
||||
} else {
|
||||
ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
|
||||
pt = &ppt->power_tune_table;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
|
||||
le16_to_cpu(pt->usConfigurableTDP);
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
|
||||
le16_to_cpu(pt->usBatteryPowerLimit);
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
|
||||
le16_to_cpu(pt->usSmallPowerLimit);
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
|
||||
le16_to_cpu(pt->usLowCACLeakage);
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
|
||||
le16_to_cpu(pt->usHighCACLeakage);
|
||||
}
|
||||
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
|
||||
ext_hdr->usSclkVddgfxTableOffset) {
|
||||
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(
|
||||
&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
|
||||
|
||||
kfree(dyn_state->vddc_dependency_on_sclk.entries);
|
||||
kfree(dyn_state->vddci_dependency_on_mclk.entries);
|
||||
kfree(dyn_state->vddc_dependency_on_mclk.entries);
|
||||
kfree(dyn_state->mvdd_dependency_on_mclk.entries);
|
||||
kfree(dyn_state->cac_leakage_table.entries);
|
||||
kfree(dyn_state->phase_shedding_limits_table.entries);
|
||||
kfree(dyn_state->ppm_table);
|
||||
kfree(dyn_state->cac_tdp_table);
|
||||
kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
|
||||
kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
|
||||
kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
|
||||
kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
|
||||
kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
|
||||
}
|
||||
|
||||
static const char *pp_lib_thermal_controller_names[] = {
|
||||
"NONE",
|
||||
"lm63",
|
||||
"adm1032",
|
||||
"adm1030",
|
||||
"max6649",
|
||||
"lm64",
|
||||
"f75375",
|
||||
"RV6xx",
|
||||
"RV770",
|
||||
"adt7473",
|
||||
"NONE",
|
||||
"External GPIO",
|
||||
"Evergreen",
|
||||
"emc2103",
|
||||
"Sumo",
|
||||
"Northern Islands",
|
||||
"Southern Islands",
|
||||
"lm96163",
|
||||
"Sea Islands",
|
||||
"Kaveri/Kabini",
|
||||
};
|
||||
|
||||
void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
ATOM_PPLIB_POWERPLAYTABLE *power_table;
|
||||
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
||||
ATOM_PPLIB_THERMALCONTROLLER *controller;
|
||||
struct amdgpu_i2c_bus_rec i2c_bus;
|
||||
u16 data_offset;
|
||||
u8 frev, crev;
|
||||
|
||||
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset))
|
||||
return;
|
||||
power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
controller = &power_table->sThermalController;
|
||||
|
||||
/* add the i2c bus for thermal/fan chip */
|
||||
if (controller->ucType > 0) {
|
||||
if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
|
||||
adev->pm.no_fan = true;
|
||||
adev->pm.fan_pulses_per_revolution =
|
||||
controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
|
||||
if (adev->pm.fan_pulses_per_revolution) {
|
||||
adev->pm.fan_min_rpm = controller->ucFanMinRPM;
|
||||
adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
|
||||
}
|
||||
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
|
||||
DRM_INFO("Internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
|
||||
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
|
||||
DRM_INFO("Internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
|
||||
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
|
||||
DRM_INFO("Internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
|
||||
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
|
||||
DRM_INFO("Internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
|
||||
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
|
||||
DRM_INFO("Internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_NI;
|
||||
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
|
||||
DRM_INFO("Internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_SI;
|
||||
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
|
||||
DRM_INFO("Internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_CI;
|
||||
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
|
||||
DRM_INFO("Internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_KV;
|
||||
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
|
||||
DRM_INFO("External GPIO thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
|
||||
} else if (controller->ucType ==
|
||||
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
|
||||
DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
|
||||
} else if (controller->ucType ==
|
||||
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
|
||||
DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
|
||||
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
|
||||
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
|
||||
pp_lib_thermal_controller_names[controller->ucType],
|
||||
controller->ucI2cAddress >> 1,
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
|
||||
i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
|
||||
adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
|
||||
if (adev->pm.i2c_bus) {
|
||||
struct i2c_board_info info = { };
|
||||
const char *name = pp_lib_thermal_controller_names[controller->ucType];
|
||||
info.addr = controller->ucI2cAddress >> 1;
|
||||
strlcpy(info.type, name, sizeof(info.type));
|
||||
i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
|
||||
}
|
||||
} else {
|
||||
DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
|
||||
controller->ucType,
|
||||
controller->ucI2cAddress >> 1,
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
|
||||
u32 sys_mask,
|
||||
enum amdgpu_pcie_gen asic_gen,
|
||||
enum amdgpu_pcie_gen default_gen)
|
||||
{
|
||||
switch (asic_gen) {
|
||||
case AMDGPU_PCIE_GEN1:
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
case AMDGPU_PCIE_GEN2:
|
||||
return AMDGPU_PCIE_GEN2;
|
||||
case AMDGPU_PCIE_GEN3:
|
||||
return AMDGPU_PCIE_GEN3;
|
||||
default:
|
||||
if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
|
||||
return AMDGPU_PCIE_GEN3;
|
||||
else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
|
||||
return AMDGPU_PCIE_GEN2;
|
||||
else
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
}
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
}
|
||||
|
||||
u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
|
||||
u16 asic_lanes,
|
||||
u16 default_lanes)
|
||||
{
|
||||
switch (asic_lanes) {
|
||||
case 0:
|
||||
default:
|
||||
return default_lanes;
|
||||
case 1:
|
||||
return 1;
|
||||
case 2:
|
||||
return 2;
|
||||
case 4:
|
||||
return 4;
|
||||
case 8:
|
||||
return 8;
|
||||
case 12:
|
||||
return 12;
|
||||
case 16:
|
||||
return 16;
|
||||
}
|
||||
}
|
||||
|
||||
u8 amdgpu_encode_pci_lane_width(u32 lanes)
|
||||
{
|
||||
u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
|
||||
|
||||
if (lanes > 16)
|
||||
return 0;
|
||||
|
||||
return encoded_lanes[lanes];
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef __AMDGPU_DPM_H__
|
||||
#define __AMDGPU_DPM_H__
|
||||
|
||||
#define R600_SSTU_DFLT 0
|
||||
#define R600_SST_DFLT 0x00C8
|
||||
|
||||
/* XXX are these ok? */
|
||||
#define R600_TEMP_RANGE_MIN (90 * 1000)
|
||||
#define R600_TEMP_RANGE_MAX (120 * 1000)
|
||||
|
||||
#define FDO_PWM_MODE_STATIC 1
|
||||
#define FDO_PWM_MODE_STATIC_RPM 5
|
||||
|
||||
enum amdgpu_td {
|
||||
AMDGPU_TD_AUTO,
|
||||
AMDGPU_TD_UP,
|
||||
AMDGPU_TD_DOWN,
|
||||
};
|
||||
|
||||
enum amdgpu_display_watermark {
|
||||
AMDGPU_DISPLAY_WATERMARK_LOW = 0,
|
||||
AMDGPU_DISPLAY_WATERMARK_HIGH = 1,
|
||||
};
|
||||
|
||||
enum amdgpu_display_gap
|
||||
{
|
||||
AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
|
||||
AMDGPU_PM_DISPLAY_GAP_VBLANK = 1,
|
||||
AMDGPU_PM_DISPLAY_GAP_WATERMARK = 2,
|
||||
AMDGPU_PM_DISPLAY_GAP_IGNORE = 3,
|
||||
};
|
||||
|
||||
void amdgpu_dpm_print_class_info(u32 class, u32 class2);
|
||||
void amdgpu_dpm_print_cap_info(u32 caps);
|
||||
void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
|
||||
struct amdgpu_ps *rps);
|
||||
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
|
||||
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
|
||||
bool amdgpu_is_uvd_state(u32 class, u32 class2);
|
||||
void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
|
||||
u32 *p, u32 *u);
|
||||
int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
|
||||
|
||||
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
|
||||
|
||||
int amdgpu_get_platform_caps(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
|
||||
void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
|
||||
|
||||
enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
|
||||
u32 sys_mask,
|
||||
enum amdgpu_pcie_gen asic_gen,
|
||||
enum amdgpu_pcie_gen default_gen);
|
||||
|
||||
u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
|
||||
u16 asic_lanes,
|
||||
u16 default_lanes);
|
||||
u8 amdgpu_encode_pci_lane_width(u32 lanes);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,439 @@
|
|||
/**
|
||||
* \file amdgpu_drv.c
|
||||
* AMD Amdgpu driver
|
||||
*
|
||||
* \author Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include "amdgpu_drv.h"
|
||||
|
||||
#include <drm/drm_pciids.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include "drm_crtc_helper.h"
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_irq.h"
|
||||
|
||||
/*
|
||||
* KMS wrapper.
|
||||
* - 3.0.0 - initial driver
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 0
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit = 0;
|
||||
int amdgpu_gart_size = -1; /* auto */
|
||||
int amdgpu_benchmarking = 0;
|
||||
int amdgpu_testing = 0;
|
||||
int amdgpu_audio = -1;
|
||||
int amdgpu_disp_priority = 0;
|
||||
int amdgpu_hw_i2c = 0;
|
||||
int amdgpu_pcie_gen2 = -1;
|
||||
int amdgpu_msi = -1;
|
||||
int amdgpu_lockup_timeout = 10000;
|
||||
int amdgpu_dpm = -1;
|
||||
int amdgpu_smc_load_fw = 1;
|
||||
int amdgpu_aspm = -1;
|
||||
int amdgpu_runtime_pm = -1;
|
||||
int amdgpu_hard_reset = 0;
|
||||
unsigned amdgpu_ip_block_mask = 0xffffffff;
|
||||
int amdgpu_bapm = -1;
|
||||
int amdgpu_deep_color = 0;
|
||||
int amdgpu_vm_size = 8;
|
||||
int amdgpu_vm_block_size = -1;
|
||||
int amdgpu_exp_hw_support = 0;
|
||||
|
||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
||||
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
||||
|
||||
MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
|
||||
module_param_named(gartsize, amdgpu_gart_size, int, 0600);
|
||||
|
||||
MODULE_PARM_DESC(benchmark, "Run benchmark");
|
||||
module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(test, "Run tests");
|
||||
module_param_named(test, amdgpu_testing, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
|
||||
module_param_named(audio, amdgpu_audio, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
|
||||
module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
|
||||
module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
|
||||
module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
|
||||
module_param_named(msi, amdgpu_msi, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
|
||||
module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
|
||||
module_param_named(dpm, amdgpu_dpm, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(smc_load_fw, "SMC firmware loading(1 = enable, 0 = disable)");
|
||||
module_param_named(smc_load_fw, amdgpu_smc_load_fw, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
|
||||
module_param_named(aspm, amdgpu_aspm, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
|
||||
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
|
||||
module_param_named(hard_reset, amdgpu_hard_reset, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
|
||||
module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
|
||||
|
||||
MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
|
||||
module_param_named(bapm, amdgpu_bapm, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
|
||||
module_param_named(deep_color, amdgpu_deep_color, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
|
||||
module_param_named(vm_size, amdgpu_vm_size, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
|
||||
module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
|
||||
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
|
||||
{0, 0, 0}
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
|
||||
static struct drm_driver kms_driver;
|
||||
|
||||
static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
|
||||
{
|
||||
struct apertures_struct *ap;
|
||||
bool primary = false;
|
||||
|
||||
ap = alloc_apertures(1);
|
||||
if (!ap)
|
||||
return -ENOMEM;
|
||||
|
||||
ap->ranges[0].base = pci_resource_start(pdev, 0);
|
||||
ap->ranges[0].size = pci_resource_len(pdev, 0);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
|
||||
#endif
|
||||
remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
|
||||
kfree(ap);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
unsigned long flags = ent->driver_data;
|
||||
int ret;
|
||||
|
||||
if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
|
||||
DRM_INFO("This hardware requires experimental hardware support.\n"
|
||||
"See modparam exp_hw_support\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Get rid of things like offb */
|
||||
ret = amdgpu_kick_out_firmware_fb(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return drm_get_pci_dev(pdev, ent, &kms_driver);
|
||||
}
|
||||
|
||||
static void
|
||||
amdgpu_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
drm_put_dev(dev);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return amdgpu_suspend_kms(drm_dev, true, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return amdgpu_resume_kms(drm_dev, true, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_freeze(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return amdgpu_suspend_kms(drm_dev, false, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_thaw(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return amdgpu_resume_kms(drm_dev, false, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
if (!amdgpu_device_is_px(drm_dev)) {
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
drm_kms_helper_poll_disable(drm_dev);
|
||||
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
|
||||
|
||||
ret = amdgpu_suspend_kms(drm_dev, false, false);
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_ignore_hotplug(pdev);
|
||||
pci_set_power_state(pdev, PCI_D3cold);
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
if (!amdgpu_device_is_px(drm_dev))
|
||||
return -EINVAL;
|
||||
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
pci_set_master(pdev);
|
||||
|
||||
ret = amdgpu_resume_kms(drm_dev, false, false);
|
||||
drm_kms_helper_poll_enable(drm_dev);
|
||||
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_runtime_idle(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (!amdgpu_device_is_px(drm_dev)) {
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
|
||||
if (crtc->enabled) {
|
||||
DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_autosuspend(dev);
|
||||
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
|
||||
return 1;
|
||||
}
|
||||
|
||||
long amdgpu_drm_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct drm_file *file_priv = filp->private_data;
|
||||
struct drm_device *dev;
|
||||
long ret;
|
||||
dev = file_priv->minor->dev;
|
||||
ret = pm_runtime_get_sync(dev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops amdgpu_pm_ops = {
|
||||
.suspend = amdgpu_pmops_suspend,
|
||||
.resume = amdgpu_pmops_resume,
|
||||
.freeze = amdgpu_pmops_freeze,
|
||||
.thaw = amdgpu_pmops_thaw,
|
||||
.poweroff = amdgpu_pmops_freeze,
|
||||
.restore = amdgpu_pmops_resume,
|
||||
.runtime_suspend = amdgpu_pmops_runtime_suspend,
|
||||
.runtime_resume = amdgpu_pmops_runtime_resume,
|
||||
.runtime_idle = amdgpu_pmops_runtime_idle,
|
||||
};
|
||||
|
||||
static const struct file_operations amdgpu_driver_kms_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.unlocked_ioctl = amdgpu_drm_ioctl,
|
||||
.mmap = amdgpu_mmap,
|
||||
.poll = drm_poll,
|
||||
.read = drm_read,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = amdgpu_kms_compat_ioctl,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct drm_driver kms_driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP |
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
|
||||
DRIVER_PRIME | DRIVER_RENDER,
|
||||
.dev_priv_size = 0,
|
||||
.load = amdgpu_driver_load_kms,
|
||||
.open = amdgpu_driver_open_kms,
|
||||
.preclose = amdgpu_driver_preclose_kms,
|
||||
.postclose = amdgpu_driver_postclose_kms,
|
||||
.lastclose = amdgpu_driver_lastclose_kms,
|
||||
.set_busid = drm_pci_set_busid,
|
||||
.unload = amdgpu_driver_unload_kms,
|
||||
.get_vblank_counter = amdgpu_get_vblank_counter_kms,
|
||||
.enable_vblank = amdgpu_enable_vblank_kms,
|
||||
.disable_vblank = amdgpu_disable_vblank_kms,
|
||||
.get_vblank_timestamp = amdgpu_get_vblank_timestamp_kms,
|
||||
.get_scanout_position = amdgpu_get_crtc_scanoutpos,
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
.debugfs_init = amdgpu_debugfs_init,
|
||||
.debugfs_cleanup = amdgpu_debugfs_cleanup,
|
||||
#endif
|
||||
.irq_preinstall = amdgpu_irq_preinstall,
|
||||
.irq_postinstall = amdgpu_irq_postinstall,
|
||||
.irq_uninstall = amdgpu_irq_uninstall,
|
||||
.irq_handler = amdgpu_irq_handler,
|
||||
.ioctls = amdgpu_ioctls_kms,
|
||||
.gem_free_object = amdgpu_gem_object_free,
|
||||
.gem_open_object = amdgpu_gem_object_open,
|
||||
.gem_close_object = amdgpu_gem_object_close,
|
||||
.dumb_create = amdgpu_mode_dumb_create,
|
||||
.dumb_map_offset = amdgpu_mode_dumb_mmap,
|
||||
.dumb_destroy = drm_gem_dumb_destroy,
|
||||
.fops = &amdgpu_driver_kms_fops,
|
||||
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = amdgpu_gem_prime_export,
|
||||
.gem_prime_import = drm_gem_prime_import,
|
||||
.gem_prime_pin = amdgpu_gem_prime_pin,
|
||||
.gem_prime_unpin = amdgpu_gem_prime_unpin,
|
||||
.gem_prime_res_obj = amdgpu_gem_prime_res_obj,
|
||||
.gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
|
||||
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
|
||||
.gem_prime_vmap = amdgpu_gem_prime_vmap,
|
||||
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = KMS_DRIVER_MAJOR,
|
||||
.minor = KMS_DRIVER_MINOR,
|
||||
.patchlevel = KMS_DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static struct drm_driver *driver;
|
||||
static struct pci_driver *pdriver;
|
||||
|
||||
static struct pci_driver amdgpu_kms_pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
.probe = amdgpu_pci_probe,
|
||||
.remove = amdgpu_pci_remove,
|
||||
.driver.pm = &amdgpu_pm_ops,
|
||||
};
|
||||
|
||||
static int __init amdgpu_init(void)
|
||||
{
|
||||
#ifdef CONFIG_VGA_CONSOLE
|
||||
if (vgacon_text_force()) {
|
||||
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
DRM_INFO("amdgpu kernel modesetting enabled.\n");
|
||||
driver = &kms_driver;
|
||||
pdriver = &amdgpu_kms_pci_driver;
|
||||
driver->driver_features |= DRIVER_MODESET;
|
||||
driver->num_ioctls = amdgpu_max_kms_ioctl;
|
||||
amdgpu_register_atpx_handler();
|
||||
|
||||
/* let modprobe override vga console setting */
|
||||
return drm_pci_init(driver, pdriver);
|
||||
}
|
||||
|
||||
static void __exit amdgpu_exit(void)
|
||||
{
|
||||
drm_pci_exit(driver, pdriver);
|
||||
amdgpu_unregister_atpx_handler();
|
||||
}
|
||||
|
||||
module_init(amdgpu_init);
|
||||
module_exit(amdgpu_exit);
|
||||
|
||||
MODULE_AUTHOR(DRIVER_AUTHOR);
|
||||
MODULE_DESCRIPTION(DRIVER_DESC);
|
||||
MODULE_LICENSE("GPL and additional rights");
|
|
@ -0,0 +1,48 @@
|
|||
/* amdgpu_drv.h -- Private header for amdgpu driver -*- linux-c -*-
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_DRV_H__
|
||||
#define __AMDGPU_DRV_H__
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "amdgpu_family.h"
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
|
||||
#define DRIVER_AUTHOR "AMD linux driver team"
|
||||
|
||||
#define DRIVER_NAME "amdgpu"
|
||||
#define DRIVER_DESC "AMD GPU"
|
||||
#define DRIVER_DATE "20150101"
|
||||
|
||||
long amdgpu_drm_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,245 @@
|
|||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
#include "atom.h"
|
||||
#include "atombios_encoders.h"
|
||||
|
||||
void
|
||||
amdgpu_link_encoder_connector(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_connector *amdgpu_connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct amdgpu_encoder *amdgpu_encoder;
|
||||
|
||||
/* walk the list and link encoders to connectors */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
amdgpu_connector = to_amdgpu_connector(connector);
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
if (amdgpu_encoder->devices & amdgpu_connector->devices) {
|
||||
drm_mode_connector_attach_encoder(connector, encoder);
|
||||
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
amdgpu_atombios_encoder_init_backlight(amdgpu_encoder, connector);
|
||||
adev->mode_info.bl_encoder = amdgpu_encoder;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
struct drm_connector *connector;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder) {
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
|
||||
DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
|
||||
amdgpu_encoder->active_device, amdgpu_encoder->devices,
|
||||
amdgpu_connector->devices, encoder->encoder_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct drm_connector *
|
||||
amdgpu_get_connector_for_encoder(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_connector *amdgpu_connector;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
amdgpu_connector = to_amdgpu_connector(connector);
|
||||
if (amdgpu_encoder->active_device & amdgpu_connector->devices)
|
||||
return connector;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct drm_connector *
|
||||
amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_connector *amdgpu_connector;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
amdgpu_connector = to_amdgpu_connector(connector);
|
||||
if (amdgpu_encoder->devices & amdgpu_connector->devices)
|
||||
return connector;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
struct drm_encoder *other_encoder;
|
||||
struct amdgpu_encoder *other_amdgpu_encoder;
|
||||
|
||||
if (amdgpu_encoder->is_ext_encoder)
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (other_encoder == encoder)
|
||||
continue;
|
||||
other_amdgpu_encoder = to_amdgpu_encoder(other_encoder);
|
||||
if (other_amdgpu_encoder->is_ext_encoder &&
|
||||
(amdgpu_encoder->devices & other_amdgpu_encoder->devices))
|
||||
return other_encoder;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_encoder *other_encoder = amdgpu_get_external_encoder(encoder);
|
||||
|
||||
if (other_encoder) {
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(other_encoder);
|
||||
|
||||
switch (amdgpu_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_TRAVIS:
|
||||
case ENCODER_OBJECT_ID_NUTMEG:
|
||||
return amdgpu_encoder->encoder_id;
|
||||
default:
|
||||
return ENCODER_OBJECT_ID_NONE;
|
||||
}
|
||||
}
|
||||
return ENCODER_OBJECT_ID_NONE;
|
||||
}
|
||||
|
||||
void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
|
||||
unsigned hblank = native_mode->htotal - native_mode->hdisplay;
|
||||
unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
|
||||
unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
|
||||
unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
|
||||
unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
|
||||
unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
|
||||
|
||||
adjusted_mode->clock = native_mode->clock;
|
||||
adjusted_mode->flags = native_mode->flags;
|
||||
|
||||
adjusted_mode->hdisplay = native_mode->hdisplay;
|
||||
adjusted_mode->vdisplay = native_mode->vdisplay;
|
||||
|
||||
adjusted_mode->htotal = native_mode->hdisplay + hblank;
|
||||
adjusted_mode->hsync_start = native_mode->hdisplay + hover;
|
||||
adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
|
||||
|
||||
adjusted_mode->vtotal = native_mode->vdisplay + vblank;
|
||||
adjusted_mode->vsync_start = native_mode->vdisplay + vover;
|
||||
adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
|
||||
|
||||
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
|
||||
|
||||
adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
|
||||
adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
|
||||
|
||||
adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
|
||||
adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
|
||||
adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
|
||||
|
||||
adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
|
||||
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
|
||||
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
|
||||
|
||||
}
|
||||
|
||||
bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
|
||||
u32 pixel_clock)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_connector *amdgpu_connector;
|
||||
struct amdgpu_connector_atom_dig *dig_connector;
|
||||
|
||||
connector = amdgpu_get_connector_for_encoder(encoder);
|
||||
/* if we don't have an active device yet, just use one of
|
||||
* the connectors tied to the encoder.
|
||||
*/
|
||||
if (!connector)
|
||||
connector = amdgpu_get_connector_for_encoder_init(encoder);
|
||||
amdgpu_connector = to_amdgpu_connector(connector);
|
||||
|
||||
switch (connector->connector_type) {
|
||||
case DRM_MODE_CONNECTOR_DVII:
|
||||
case DRM_MODE_CONNECTOR_HDMIB:
|
||||
if (amdgpu_connector->use_digital) {
|
||||
/* HDMI 1.3 supports up to 340 Mhz over single link */
|
||||
if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
|
||||
if (pixel_clock > 340000)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
} else {
|
||||
if (pixel_clock > 165000)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
} else
|
||||
return false;
|
||||
case DRM_MODE_CONNECTOR_DVID:
|
||||
case DRM_MODE_CONNECTOR_HDMIA:
|
||||
case DRM_MODE_CONNECTOR_DisplayPort:
|
||||
dig_connector = amdgpu_connector->con_priv;
|
||||
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
|
||||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
|
||||
return false;
|
||||
else {
|
||||
/* HDMI 1.3 supports up to 340 Mhz over single link */
|
||||
if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
|
||||
if (pixel_clock > 340000)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
} else {
|
||||
if (pixel_clock > 165000)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,432 @@
|
|||
/*
|
||||
* Copyright © 2007 David Airlie
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* David Airlie
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fb.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
#include <drm/drm_fb_helper.h>
|
||||
|
||||
#include <linux/vga_switcheroo.h>
|
||||
|
||||
/* object hierarchy -
|
||||
this contains a helper + a amdgpu fb
|
||||
the helper contains a pointer to amdgpu framebuffer baseclass.
|
||||
*/
|
||||
struct amdgpu_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct amdgpu_framebuffer rfb;
|
||||
struct list_head fbdev_list;
|
||||
struct amdgpu_device *adev;
|
||||
};
|
||||
|
||||
static struct fb_ops amdgpufb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_fillrect = cfb_fillrect,
|
||||
.fb_copyarea = cfb_copyarea,
|
||||
.fb_imageblit = cfb_imageblit,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
.fb_debug_enter = drm_fb_helper_debug_enter,
|
||||
.fb_debug_leave = drm_fb_helper_debug_leave,
|
||||
};
|
||||
|
||||
|
||||
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled)
|
||||
{
|
||||
int aligned = width;
|
||||
int pitch_mask = 0;
|
||||
|
||||
switch (bpp / 8) {
|
||||
case 1:
|
||||
pitch_mask = 255;
|
||||
break;
|
||||
case 2:
|
||||
pitch_mask = 127;
|
||||
break;
|
||||
case 3:
|
||||
case 4:
|
||||
pitch_mask = 63;
|
||||
break;
|
||||
}
|
||||
|
||||
aligned += pitch_mask;
|
||||
aligned &= ~pitch_mask;
|
||||
return aligned;
|
||||
}
|
||||
|
||||
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
|
||||
{
|
||||
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj);
|
||||
int ret;
|
||||
|
||||
ret = amdgpu_bo_reserve(rbo, false);
|
||||
if (likely(ret == 0)) {
|
||||
amdgpu_bo_kunmap(rbo);
|
||||
amdgpu_bo_unpin(rbo);
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
}
|
||||
|
||||
static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object **gobj_p)
|
||||
{
|
||||
struct amdgpu_device *adev = rfbdev->adev;
|
||||
struct drm_gem_object *gobj = NULL;
|
||||
struct amdgpu_bo *rbo = NULL;
|
||||
bool fb_tiled = false; /* useful for testing */
|
||||
u32 tiling_flags = 0;
|
||||
int ret;
|
||||
int aligned_size, size;
|
||||
int height = mode_cmd->height;
|
||||
u32 bpp, depth;
|
||||
|
||||
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
|
||||
|
||||
/* need to align pitch with crtc limits */
|
||||
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp,
|
||||
fb_tiled) * ((bpp + 1) / 8);
|
||||
|
||||
height = ALIGN(mode_cmd->height, 8);
|
||||
size = mode_cmd->pitches[0] * height;
|
||||
aligned_size = ALIGN(size, PAGE_SIZE);
|
||||
ret = amdgpu_gem_object_create(adev, aligned_size, 0,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
0, true,
|
||||
&gobj);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
|
||||
aligned_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rbo = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
if (fb_tiled)
|
||||
tiling_flags = AMDGPU_TILING_MACRO;
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
switch (bpp) {
|
||||
case 32:
|
||||
tiling_flags |= AMDGPU_TILING_SWAP_32BIT;
|
||||
break;
|
||||
case 16:
|
||||
tiling_flags |= AMDGPU_TILING_SWAP_16BIT;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = amdgpu_bo_reserve(rbo, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unref;
|
||||
|
||||
if (tiling_flags) {
|
||||
ret = amdgpu_bo_set_tiling_flags(rbo,
|
||||
tiling_flags | AMDGPU_TILING_SURFACE);
|
||||
if (ret)
|
||||
dev_err(adev->dev, "FB failed to set tiling flags\n");
|
||||
}
|
||||
|
||||
|
||||
ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL);
|
||||
if (ret) {
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
goto out_unref;
|
||||
}
|
||||
ret = amdgpu_bo_kmap(rbo, NULL);
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
if (ret) {
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
*gobj_p = gobj;
|
||||
return 0;
|
||||
out_unref:
|
||||
amdgpufb_destroy_pinned_object(gobj);
|
||||
*gobj_p = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper;
|
||||
struct amdgpu_device *adev = rfbdev->adev;
|
||||
struct fb_info *info;
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
struct drm_mode_fb_cmd2 mode_cmd;
|
||||
struct drm_gem_object *gobj = NULL;
|
||||
struct amdgpu_bo *rbo = NULL;
|
||||
struct device *device = &adev->pdev->dev;
|
||||
int ret;
|
||||
unsigned long tmp;
|
||||
|
||||
mode_cmd.width = sizes->surface_width;
|
||||
mode_cmd.height = sizes->surface_height;
|
||||
|
||||
if (sizes->surface_bpp == 24)
|
||||
sizes->surface_bpp = 32;
|
||||
|
||||
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
|
||||
sizes->surface_depth);
|
||||
|
||||
ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create fbcon object %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
rbo = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
/* okay we have an object now allocate the framebuffer */
|
||||
info = framebuffer_alloc(0, device);
|
||||
if (info == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
info->par = rfbdev;
|
||||
|
||||
ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
fb = &rfbdev->rfb.base;
|
||||
|
||||
/* setup helper */
|
||||
rfbdev->helper.fb = fb;
|
||||
rfbdev->helper.fbdev = info;
|
||||
|
||||
memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
|
||||
|
||||
strcpy(info->fix.id, "amdgpudrmfb");
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
|
||||
|
||||
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
|
||||
info->fbops = &amdgpufb_ops;
|
||||
|
||||
tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start;
|
||||
info->fix.smem_start = adev->mc.aper_base + tmp;
|
||||
info->fix.smem_len = amdgpu_bo_size(rbo);
|
||||
info->screen_base = rbo->kptr;
|
||||
info->screen_size = amdgpu_bo_size(rbo);
|
||||
|
||||
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
info->apertures = alloc_apertures(1);
|
||||
if (!info->apertures) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
}
|
||||
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
|
||||
info->apertures->ranges[0].size = adev->mc.aper_size;
|
||||
|
||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||
|
||||
if (info->screen_base == NULL) {
|
||||
ret = -ENOSPC;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
ret = fb_alloc_cmap(&info->cmap, 256, 0);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
|
||||
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
|
||||
DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo));
|
||||
DRM_INFO("fb depth is %d\n", fb->depth);
|
||||
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
|
||||
|
||||
vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
|
||||
return 0;
|
||||
|
||||
out_unref:
|
||||
if (rbo) {
|
||||
|
||||
}
|
||||
if (fb && ret) {
|
||||
drm_gem_object_unreference(gobj);
|
||||
drm_framebuffer_unregister_private(fb);
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(fb);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mode_info.rfbdev)
|
||||
drm_fb_helper_hotplug_event(&adev->mode_info.rfbdev->helper);
|
||||
}
|
||||
|
||||
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
|
||||
|
||||
if (rfbdev->helper.fbdev) {
|
||||
info = rfbdev->helper.fbdev;
|
||||
|
||||
unregister_framebuffer(info);
|
||||
if (info->cmap.len)
|
||||
fb_dealloc_cmap(&info->cmap);
|
||||
framebuffer_release(info);
|
||||
}
|
||||
|
||||
if (rfb->obj) {
|
||||
amdgpufb_destroy_pinned_object(rfb->obj);
|
||||
rfb->obj = NULL;
|
||||
}
|
||||
drm_fb_helper_fini(&rfbdev->helper);
|
||||
drm_framebuffer_unregister_private(&rfb->base);
|
||||
drm_framebuffer_cleanup(&rfb->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Sets the color ramps on behalf of fbcon */
|
||||
static void amdgpu_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
||||
u16 blue, int regno)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
amdgpu_crtc->lut_r[regno] = red >> 6;
|
||||
amdgpu_crtc->lut_g[regno] = green >> 6;
|
||||
amdgpu_crtc->lut_b[regno] = blue >> 6;
|
||||
}
|
||||
|
||||
/** Gets the color ramps on behalf of fbcon */
|
||||
static void amdgpu_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
u16 *blue, int regno)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
*red = amdgpu_crtc->lut_r[regno] << 6;
|
||||
*green = amdgpu_crtc->lut_g[regno] << 6;
|
||||
*blue = amdgpu_crtc->lut_b[regno] << 6;
|
||||
}
|
||||
|
||||
static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
|
||||
.gamma_set = amdgpu_crtc_fb_gamma_set,
|
||||
.gamma_get = amdgpu_crtc_fb_gamma_get,
|
||||
.fb_probe = amdgpufb_create,
|
||||
};
|
||||
|
||||
int amdgpu_fbdev_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_fbdev *rfbdev;
|
||||
int bpp_sel = 32;
|
||||
int ret;
|
||||
|
||||
/* don't init fbdev on hw without DCE */
|
||||
if (!adev->mode_info.mode_config_initialized)
|
||||
return 0;
|
||||
|
||||
/* select 8 bpp console on low vram cards */
|
||||
if (adev->mc.real_vram_size <= (32*1024*1024))
|
||||
bpp_sel = 8;
|
||||
|
||||
rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
|
||||
if (!rfbdev)
|
||||
return -ENOMEM;
|
||||
|
||||
rfbdev->adev = adev;
|
||||
adev->mode_info.rfbdev = rfbdev;
|
||||
|
||||
drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
|
||||
&amdgpu_fb_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
|
||||
adev->mode_info.num_crtc,
|
||||
AMDGPUFB_CONN_LIMIT);
|
||||
if (ret) {
|
||||
kfree(rfbdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
|
||||
|
||||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||
drm_helper_disable_unused_functions(adev->ddev);
|
||||
|
||||
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_fbdev_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (!adev->mode_info.rfbdev)
|
||||
return;
|
||||
|
||||
amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev);
|
||||
kfree(adev->mode_info.rfbdev);
|
||||
adev->mode_info.rfbdev = NULL;
|
||||
}
|
||||
|
||||
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
|
||||
{
|
||||
if (adev->mode_info.rfbdev)
|
||||
fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state);
|
||||
}
|
||||
|
||||
int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_bo *robj;
|
||||
int size = 0;
|
||||
|
||||
if (!adev->mode_info.rfbdev)
|
||||
return 0;
|
||||
|
||||
robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj);
|
||||
size += amdgpu_bo_size(robj);
|
||||
return size;
|
||||
}
|
||||
|
||||
bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
|
||||
{
|
||||
if (!adev->mode_info.rfbdev)
|
||||
return false;
|
||||
if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj))
|
||||
return true;
|
||||
return false;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,371 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
/*
|
||||
* GART
|
||||
* The GART (Graphics Aperture Remapping Table) is an aperture
|
||||
* in the GPU's address space. System pages can be mapped into
|
||||
* the aperture and look like contiguous pages from the GPU's
|
||||
* perspective. A page table maps the pages in the aperture
|
||||
* to the actual backing pages in system memory.
|
||||
*
|
||||
* Radeon GPUs support both an internal GART, as described above,
|
||||
* and AGP. AGP works similarly, but the GART table is configured
|
||||
* and maintained by the northbridge rather than the driver.
|
||||
* Radeon hw has a separate AGP aperture that is programmed to
|
||||
* point to the AGP aperture provided by the northbridge and the
|
||||
* requests are passed through to the northbridge aperture.
|
||||
* Both AGP and internal GART can be used at the same time, however
|
||||
* that is not currently supported by the driver.
|
||||
*
|
||||
* This file handles the common internal GART management.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Common GART table functions.
|
||||
*/
|
||||
/**
|
||||
* amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Allocate system memory for GART page table
|
||||
* (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
|
||||
* gart table to be in system memory.
|
||||
* Returns 0 for success, -ENOMEM for failure.
|
||||
*/
|
||||
int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
|
||||
&adev->gart.table_addr);
|
||||
if (ptr == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
#ifdef CONFIG_X86
|
||||
if (0) {
|
||||
set_memory_uc((unsigned long)ptr,
|
||||
adev->gart.table_size >> PAGE_SHIFT);
|
||||
}
|
||||
#endif
|
||||
adev->gart.ptr = ptr;
|
||||
memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gart_table_ram_free - free system ram for gart page table
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Free system memory for GART page table
|
||||
* (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
|
||||
* gart table to be in system memory.
|
||||
*/
|
||||
void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->gart.ptr == NULL) {
|
||||
return;
|
||||
}
|
||||
#ifdef CONFIG_X86
|
||||
if (0) {
|
||||
set_memory_wb((unsigned long)adev->gart.ptr,
|
||||
adev->gart.table_size >> PAGE_SHIFT);
|
||||
}
|
||||
#endif
|
||||
pci_free_consistent(adev->pdev, adev->gart.table_size,
|
||||
(void *)adev->gart.ptr,
|
||||
adev->gart.table_addr);
|
||||
adev->gart.ptr = NULL;
|
||||
adev->gart.table_addr = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gart_table_vram_alloc - allocate vram for gart page table
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Allocate video memory for GART page table
|
||||
* (pcie r4xx, r5xx+). These asics require the
|
||||
* gart table to be in video memory.
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->gart.robj == NULL) {
|
||||
r = amdgpu_bo_create(adev, adev->gart.table_size,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
NULL, &adev->gart.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gart_table_vram_pin - pin gart page table in vram
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Pin the GART page table in vram so it will not be moved
|
||||
* by the memory manager (pcie r4xx, r5xx+). These asics require the
|
||||
* gart table to be in video memory.
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t gpu_addr;
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gart.robj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_pin(adev->gart.robj,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->gart.robj);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
|
||||
if (r)
|
||||
amdgpu_bo_unpin(adev->gart.robj);
|
||||
amdgpu_bo_unreserve(adev->gart.robj);
|
||||
adev->gart.table_addr = gpu_addr;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gart_table_vram_unpin - unpin gart page table in vram
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Unpin the GART page table in vram (pcie r4xx, r5xx+).
|
||||
* These asics require the gart table to be in video memory.
|
||||
*/
|
||||
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->gart.robj == NULL) {
|
||||
return;
|
||||
}
|
||||
r = amdgpu_bo_reserve(adev->gart.robj, false);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_kunmap(adev->gart.robj);
|
||||
amdgpu_bo_unpin(adev->gart.robj);
|
||||
amdgpu_bo_unreserve(adev->gart.robj);
|
||||
adev->gart.ptr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gart_table_vram_free - free gart page table vram
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Free the video memory used for the GART page table
|
||||
* (pcie r4xx, r5xx+). These asics require the gart table to
|
||||
* be in video memory.
|
||||
*/
|
||||
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->gart.robj == NULL) {
|
||||
return;
|
||||
}
|
||||
amdgpu_bo_unref(&adev->gart.robj);
|
||||
}
|
||||
|
||||
/*
|
||||
* Common gart functions.
|
||||
*/
|
||||
/**
|
||||
* amdgpu_gart_unbind - unbind pages from the gart page table
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @offset: offset into the GPU's gart aperture
|
||||
* @pages: number of pages to unbind
|
||||
*
|
||||
* Unbinds the requested pages from the gart page table and
|
||||
* replaces them with the dummy page (all asics).
|
||||
*/
|
||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
|
||||
int pages)
|
||||
{
|
||||
unsigned t;
|
||||
unsigned p;
|
||||
int i, j;
|
||||
u64 page_base;
|
||||
uint32_t flags = AMDGPU_PTE_SYSTEM;
|
||||
|
||||
if (!adev->gart.ready) {
|
||||
WARN(1, "trying to unbind memory from uninitialized GART !\n");
|
||||
return;
|
||||
}
|
||||
|
||||
t = offset / AMDGPU_GPU_PAGE_SIZE;
|
||||
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||
for (i = 0; i < pages; i++, p++) {
|
||||
if (adev->gart.pages[p]) {
|
||||
adev->gart.pages[p] = NULL;
|
||||
adev->gart.pages_addr[p] = adev->dummy_page.addr;
|
||||
page_base = adev->gart.pages_addr[p];
|
||||
if (!adev->gart.ptr)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
|
||||
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
|
||||
t, page_base, flags);
|
||||
page_base += AMDGPU_GPU_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
mb();
|
||||
amdgpu_gart_flush_gpu_tlb(adev, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gart_bind - bind pages into the gart page table
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @offset: offset into the GPU's gart aperture
|
||||
* @pages: number of pages to bind
|
||||
* @pagelist: pages to bind
|
||||
* @dma_addr: DMA addresses of pages
|
||||
*
|
||||
* Binds the requested pages to the gart page table
|
||||
* (all asics).
|
||||
* Returns 0 for success, -EINVAL for failure.
|
||||
*/
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
|
||||
int pages, struct page **pagelist, dma_addr_t *dma_addr,
|
||||
uint32_t flags)
|
||||
{
|
||||
unsigned t;
|
||||
unsigned p;
|
||||
uint64_t page_base;
|
||||
int i, j;
|
||||
|
||||
if (!adev->gart.ready) {
|
||||
WARN(1, "trying to bind memory to uninitialized GART !\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
t = offset / AMDGPU_GPU_PAGE_SIZE;
|
||||
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||
|
||||
for (i = 0; i < pages; i++, p++) {
|
||||
adev->gart.pages_addr[p] = dma_addr[i];
|
||||
adev->gart.pages[p] = pagelist[i];
|
||||
if (adev->gart.ptr) {
|
||||
page_base = adev->gart.pages_addr[p];
|
||||
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
|
||||
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
|
||||
page_base += AMDGPU_GPU_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
mb();
|
||||
amdgpu_gart_flush_gpu_tlb(adev, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gart_init - init the driver info for managing the gart
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Allocate the dummy page and init the gart driver info (all asics).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int amdgpu_gart_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r, i;
|
||||
|
||||
if (adev->gart.pages) {
|
||||
return 0;
|
||||
}
|
||||
/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
|
||||
if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
|
||||
DRM_ERROR("Page size is smaller than GPU page size!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = amdgpu_dummy_page_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Compute table size */
|
||||
adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
|
||||
adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
|
||||
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
|
||||
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
|
||||
/* Allocate pages table */
|
||||
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
|
||||
if (adev->gart.pages == NULL) {
|
||||
amdgpu_gart_fini(adev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
|
||||
adev->gart.num_cpu_pages);
|
||||
if (adev->gart.pages_addr == NULL) {
|
||||
amdgpu_gart_fini(adev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* set GART entry to point to the dummy page by default */
|
||||
for (i = 0; i < adev->gart.num_cpu_pages; i++) {
|
||||
adev->gart.pages_addr[i] = adev->dummy_page.addr;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gart_fini - tear down the driver info for managing the gart
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Tear down the gart driver info and free the dummy page (all asics).
|
||||
*/
|
||||
void amdgpu_gart_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->gart.pages && adev->gart.pages_addr && adev->gart.ready) {
|
||||
/* unbind pages */
|
||||
amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
|
||||
}
|
||||
adev->gart.ready = false;
|
||||
vfree(adev->gart.pages);
|
||||
vfree(adev->gart.pages_addr);
|
||||
adev->gart.pages = NULL;
|
||||
adev->gart.pages_addr = NULL;
|
||||
|
||||
amdgpu_dummy_page_fini(adev);
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_GDS_H__
|
||||
#define __AMDGPU_GDS_H__
|
||||
|
||||
/* Because TTM request that alloacted buffer should be PAGE_SIZE aligned,
|
||||
* we should report GDS/GWS/OA size as PAGE_SIZE aligned
|
||||
* */
|
||||
#define AMDGPU_GDS_SHIFT 2
|
||||
#define AMDGPU_GWS_SHIFT PAGE_SHIFT
|
||||
#define AMDGPU_OA_SHIFT PAGE_SHIFT
|
||||
|
||||
#define AMDGPU_PL_GDS TTM_PL_PRIV0
|
||||
#define AMDGPU_PL_GWS TTM_PL_PRIV1
|
||||
#define AMDGPU_PL_OA TTM_PL_PRIV2
|
||||
|
||||
#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0
|
||||
#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1
|
||||
#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2
|
||||
|
||||
struct amdgpu_ring;
|
||||
struct amdgpu_bo;
|
||||
|
||||
struct amdgpu_gds_asic_info {
|
||||
uint32_t total_size;
|
||||
uint32_t gfx_partition_size;
|
||||
uint32_t cs_partition_size;
|
||||
};
|
||||
|
||||
struct amdgpu_gds {
|
||||
struct amdgpu_gds_asic_info mem;
|
||||
struct amdgpu_gds_asic_info gws;
|
||||
struct amdgpu_gds_asic_info oa;
|
||||
/* At present, GDS, GWS and OA resources for gfx (graphics)
|
||||
* is always pre-allocated and available for graphics operation.
|
||||
* Such resource is shared between all gfx clients.
|
||||
* TODO: move this operation to user space
|
||||
* */
|
||||
struct amdgpu_bo* gds_gfx_bo;
|
||||
struct amdgpu_bo* gws_gfx_bo;
|
||||
struct amdgpu_bo* oa_gfx_bo;
|
||||
};
|
||||
|
||||
struct amdgpu_gds_reg_offset {
|
||||
uint32_t mem_base;
|
||||
uint32_t mem_size;
|
||||
uint32_t gws;
|
||||
uint32_t oa;
|
||||
};
|
||||
|
||||
#endif /* __AMDGPU_GDS_H__ */
|
|
@ -0,0 +1,735 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <linux/ktime.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
void amdgpu_gem_object_free(struct drm_gem_object *gobj)
|
||||
{
|
||||
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
if (robj) {
|
||||
if (robj->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
|
||||
amdgpu_bo_unref(&robj);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||
int alignment, u32 initial_domain,
|
||||
u64 flags, bool kernel,
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
struct amdgpu_bo *robj;
|
||||
unsigned long max_size;
|
||||
int r;
|
||||
|
||||
*obj = NULL;
|
||||
/* At least align on page size */
|
||||
if (alignment < PAGE_SIZE) {
|
||||
alignment = PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
|
||||
/* Maximum bo size is the unpinned gtt size since we use the gtt to
|
||||
* handle vram to system pool migrations.
|
||||
*/
|
||||
max_size = adev->mc.gtt_size - adev->gart_pin_size;
|
||||
if (size > max_size) {
|
||||
DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
|
||||
size >> 20, max_size >> 20);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
retry:
|
||||
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
|
||||
goto retry;
|
||||
}
|
||||
DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
|
||||
size, initial_domain, alignment, r);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
*obj = &robj->gem_base;
|
||||
robj->pid = task_pid_nr(current);
|
||||
|
||||
mutex_lock(&adev->gem.mutex);
|
||||
list_add_tail(&robj->list, &adev->gem.objects);
|
||||
mutex_unlock(&adev->gem.mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_gem_init(struct amdgpu_device *adev)
|
||||
{
|
||||
INIT_LIST_HEAD(&adev->gem.objects);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_gem_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_bo_force_delete(adev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Call from drm_gem_handle_create which appear in both new and open ioctl
|
||||
* case.
|
||||
*/
|
||||
int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
||||
{
|
||||
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = rbo->adev;
|
||||
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_reserve(rbo, false);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
bo_va = amdgpu_vm_bo_find(vm, rbo);
|
||||
if (!bo_va) {
|
||||
bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
|
||||
} else {
|
||||
++bo_va->ref_count;
|
||||
}
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = rbo->adev;
|
||||
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_reserve(rbo, true);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "leaking bo va because "
|
||||
"we fail to reserve bo (%d)\n", r);
|
||||
return;
|
||||
}
|
||||
bo_va = amdgpu_vm_bo_find(vm, rbo);
|
||||
if (bo_va) {
|
||||
if (--bo_va->ref_count == 0) {
|
||||
amdgpu_vm_bo_rmv(adev, bo_va);
|
||||
}
|
||||
}
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
}
|
||||
|
||||
static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
|
||||
{
|
||||
if (r == -EDEADLK) {
|
||||
r = amdgpu_gpu_reset(adev);
|
||||
if (!r)
|
||||
r = -EAGAIN;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* GEM ioctls.
|
||||
*/
|
||||
int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
union drm_amdgpu_gem_create *args = data;
|
||||
uint64_t size = args->in.bo_size;
|
||||
struct drm_gem_object *gobj;
|
||||
uint32_t handle;
|
||||
bool kernel = false;
|
||||
int r;
|
||||
|
||||
down_read(&adev->exclusive_lock);
|
||||
/* create a gem object to contain this object in */
|
||||
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
|
||||
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
|
||||
kernel = true;
|
||||
if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
|
||||
size = size << AMDGPU_GDS_SHIFT;
|
||||
else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
|
||||
size = size << AMDGPU_GWS_SHIFT;
|
||||
else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
|
||||
size = size << AMDGPU_OA_SHIFT;
|
||||
else {
|
||||
r = -EINVAL;
|
||||
goto error_unlock;
|
||||
}
|
||||
}
|
||||
size = roundup(size, PAGE_SIZE);
|
||||
|
||||
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
|
||||
(u32)(0xffffffff & args->in.domains),
|
||||
args->in.domain_flags,
|
||||
kernel, &gobj);
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.handle = handle;
|
||||
up_read(&adev->exclusive_lock);
|
||||
return 0;
|
||||
|
||||
error_unlock:
|
||||
up_read(&adev->exclusive_lock);
|
||||
r = amdgpu_gem_handle_lockup(adev, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_amdgpu_gem_userptr *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdgpu_bo *bo;
|
||||
uint32_t handle;
|
||||
int r;
|
||||
|
||||
if (offset_in_page(args->addr | args->size))
|
||||
return -EINVAL;
|
||||
|
||||
/* reject unknown flag values */
|
||||
if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
|
||||
AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
|
||||
AMDGPU_GEM_USERPTR_REGISTER))
|
||||
return -EINVAL;
|
||||
|
||||
if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
|
||||
!(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
|
||||
|
||||
/* if we want to write to it we must require anonymous
|
||||
memory and install a MMU notifier */
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
down_read(&adev->exclusive_lock);
|
||||
|
||||
/* create a gem object to contain this object in */
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0,
|
||||
AMDGPU_GEM_DOMAIN_CPU, 0,
|
||||
0, &gobj);
|
||||
if (r)
|
||||
goto handle_lockup;
|
||||
|
||||
bo = gem_to_amdgpu_bo(gobj);
|
||||
r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
|
||||
if (r)
|
||||
goto release_object;
|
||||
|
||||
if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
|
||||
r = amdgpu_mn_register(bo, args->addr);
|
||||
if (r)
|
||||
goto release_object;
|
||||
}
|
||||
|
||||
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
r = amdgpu_bo_reserve(bo, true);
|
||||
if (r) {
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
goto release_object;
|
||||
}
|
||||
|
||||
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
if (r)
|
||||
goto release_object;
|
||||
}
|
||||
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
if (r)
|
||||
goto handle_lockup;
|
||||
|
||||
args->handle = handle;
|
||||
up_read(&adev->exclusive_lock);
|
||||
return 0;
|
||||
|
||||
release_object:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
|
||||
handle_lockup:
|
||||
up_read(&adev->exclusive_lock);
|
||||
r = amdgpu_gem_handle_lockup(adev, r);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_mode_dumb_mmap(struct drm_file *filp,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset_p)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdgpu_bo *robj;
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, handle);
|
||||
if (gobj == NULL) {
|
||||
return -ENOENT;
|
||||
}
|
||||
robj = gem_to_amdgpu_bo(gobj);
|
||||
if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return -EPERM;
|
||||
}
|
||||
*offset_p = amdgpu_bo_mmap_offset(robj);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
union drm_amdgpu_gem_mmap *args = data;
|
||||
uint32_t handle = args->in.handle;
|
||||
memset(args, 0, sizeof(*args));
|
||||
return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gem_timeout - calculate jiffies timeout from absolute value
|
||||
*
|
||||
* @timeout_ns: timeout in ns
|
||||
*
|
||||
* Calculate the timeout in jiffies from an absolute timeout in ns.
|
||||
*/
|
||||
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
|
||||
{
|
||||
unsigned long timeout_jiffies;
|
||||
ktime_t timeout;
|
||||
|
||||
/* clamp timeout if it's to large */
|
||||
if (((int64_t)timeout_ns) < 0)
|
||||
return MAX_SCHEDULE_TIMEOUT;
|
||||
|
||||
timeout = ktime_sub_ns(ktime_get(), timeout_ns);
|
||||
if (ktime_to_ns(timeout) < 0)
|
||||
return 0;
|
||||
|
||||
timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
|
||||
/* clamp timeout to avoid unsigned-> signed overflow */
|
||||
if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
|
||||
return MAX_SCHEDULE_TIMEOUT - 1;
|
||||
|
||||
return timeout_jiffies;
|
||||
}
|
||||
|
||||
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
union drm_amdgpu_gem_wait_idle *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdgpu_bo *robj;
|
||||
uint32_t handle = args->in.handle;
|
||||
unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
|
||||
int r = 0;
|
||||
long ret;
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, handle);
|
||||
if (gobj == NULL) {
|
||||
return -ENOENT;
|
||||
}
|
||||
robj = gem_to_amdgpu_bo(gobj);
|
||||
if (timeout == 0)
|
||||
ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
|
||||
else
|
||||
ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
|
||||
|
||||
/* ret == 0 means not signaled,
|
||||
* ret > 0 means signaled
|
||||
* ret < 0 means interrupted before timeout
|
||||
*/
|
||||
if (ret >= 0) {
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.status = (ret == 0);
|
||||
} else
|
||||
r = ret;
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
r = amdgpu_gem_handle_lockup(adev, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct drm_amdgpu_gem_metadata *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdgpu_bo *robj;
|
||||
int r = -1;
|
||||
|
||||
DRM_DEBUG("%d \n", args->handle);
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL)
|
||||
return -ENOENT;
|
||||
robj = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
r = amdgpu_bo_reserve(robj, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out;
|
||||
|
||||
if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
|
||||
amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
|
||||
r = amdgpu_bo_get_metadata(robj, args->data.data,
|
||||
sizeof(args->data.data),
|
||||
&args->data.data_size_bytes,
|
||||
&args->data.flags);
|
||||
} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
|
||||
r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
|
||||
if (!r)
|
||||
r = amdgpu_bo_set_metadata(robj, args->data.data,
|
||||
args->data.data_size_bytes,
|
||||
args->data.flags);
|
||||
}
|
||||
|
||||
amdgpu_bo_unreserve(robj);
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gem_va_update_vm -update the bo_va in its VM
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @bo_va: bo_va to update
|
||||
*
|
||||
* Update the bo_va directly after setting it's address. Errors are not
|
||||
* vital here, so they are not reported back to userspace.
|
||||
*/
|
||||
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va)
|
||||
{
|
||||
struct ttm_validate_buffer tv, *entry;
|
||||
struct amdgpu_bo_list_entry *vm_bos;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list;
|
||||
unsigned domain;
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
||||
tv.bo = &bo_va->bo->tbo;
|
||||
tv.shared = true;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list);
|
||||
if (!vm_bos)
|
||||
return;
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
list_for_each_entry(entry, &list, head) {
|
||||
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
|
||||
/* if anything is swapped out don't swap it in here,
|
||||
just abort and wait for the next CS */
|
||||
if (domain == AMDGPU_GEM_DOMAIN_CPU)
|
||||
goto error_unreserve;
|
||||
}
|
||||
|
||||
mutex_lock(&bo_va->vm->mutex);
|
||||
r = amdgpu_vm_clear_freed(adev, bo_va->vm);
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
|
||||
|
||||
error_unlock:
|
||||
mutex_unlock(&bo_va->vm->mutex);
|
||||
|
||||
error_unreserve:
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
||||
error_free:
|
||||
drm_free_large(vm_bos);
|
||||
|
||||
if (r)
|
||||
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
union drm_amdgpu_gem_va *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
struct amdgpu_bo *rbo;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
uint32_t invalid_flags, va_flags = 0;
|
||||
int r = 0;
|
||||
|
||||
if (!adev->vm_manager.enabled) {
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
if (args->in.va_address < AMDGPU_VA_RESERVED_SIZE) {
|
||||
dev_err(&dev->pdev->dev,
|
||||
"va_address 0x%lX is in reserved area 0x%X\n",
|
||||
(unsigned long)args->in.va_address,
|
||||
AMDGPU_VA_RESERVED_SIZE);
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
|
||||
AMDGPU_VM_PAGE_EXECUTABLE);
|
||||
if ((args->in.flags & invalid_flags)) {
|
||||
dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
|
||||
args->in.flags, invalid_flags);
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (args->in.operation) {
|
||||
case AMDGPU_VA_OP_MAP:
|
||||
case AMDGPU_VA_OP_UNMAP:
|
||||
break;
|
||||
default:
|
||||
dev_err(&dev->pdev->dev, "unsupported operation %d\n",
|
||||
args->in.operation);
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->in.handle);
|
||||
if (gobj == NULL) {
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
return -ENOENT;
|
||||
}
|
||||
rbo = gem_to_amdgpu_bo(gobj);
|
||||
r = amdgpu_bo_reserve(rbo, false);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
|
||||
if (!bo_va) {
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
switch (args->in.operation) {
|
||||
case AMDGPU_VA_OP_MAP:
|
||||
if (args->in.flags & AMDGPU_VM_PAGE_READABLE)
|
||||
va_flags |= AMDGPU_PTE_READABLE;
|
||||
if (args->in.flags & AMDGPU_VM_PAGE_WRITEABLE)
|
||||
va_flags |= AMDGPU_PTE_WRITEABLE;
|
||||
if (args->in.flags & AMDGPU_VM_PAGE_EXECUTABLE)
|
||||
va_flags |= AMDGPU_PTE_EXECUTABLE;
|
||||
r = amdgpu_vm_bo_map(adev, bo_va, args->in.va_address, 0,
|
||||
amdgpu_bo_size(bo_va->bo), va_flags);
|
||||
break;
|
||||
case AMDGPU_VA_OP_UNMAP:
|
||||
r = amdgpu_vm_bo_unmap(adev, bo_va, args->in.va_address);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!r) {
|
||||
amdgpu_gem_va_update_vm(adev, bo_va);
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_OK;
|
||||
} else {
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.result = AMDGPU_VA_RESULT_ERROR;
|
||||
}
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct drm_amdgpu_gem_op *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdgpu_bo *robj;
|
||||
int r;
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL) {
|
||||
return -ENOENT;
|
||||
}
|
||||
robj = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
r = amdgpu_bo_reserve(robj, false);
|
||||
if (unlikely(r))
|
||||
goto out;
|
||||
|
||||
switch (args->op) {
|
||||
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
|
||||
struct drm_amdgpu_gem_create_in info;
|
||||
void __user *out = (void __user *)(long)args->value;
|
||||
|
||||
info.bo_size = robj->gem_base.size;
|
||||
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
|
||||
info.domains = robj->initial_domain;
|
||||
info.domain_flags = robj->flags;
|
||||
if (copy_to_user(out, &info, sizeof(info)))
|
||||
r = -EFAULT;
|
||||
break;
|
||||
}
|
||||
case AMDGPU_GEM_OP_SET_INITIAL_DOMAIN:
|
||||
if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
|
||||
r = -EPERM;
|
||||
break;
|
||||
}
|
||||
robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
|
||||
AMDGPU_GEM_DOMAIN_GTT |
|
||||
AMDGPU_GEM_DOMAIN_CPU);
|
||||
break;
|
||||
default:
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
||||
amdgpu_bo_unreserve(robj);
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_gem_object *gobj;
|
||||
uint32_t handle;
|
||||
int r;
|
||||
|
||||
args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
|
||||
args->size = args->pitch * args->height;
|
||||
args->size = ALIGN(args->size, PAGE_SIZE);
|
||||
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
0, ttm_bo_type_device,
|
||||
&gobj);
|
||||
if (r)
|
||||
return -ENOMEM;
|
||||
|
||||
r = drm_gem_handle_create(file_priv, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
args->handle = handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_bo *rbo;
|
||||
unsigned i = 0;
|
||||
|
||||
mutex_lock(&adev->gem.mutex);
|
||||
list_for_each_entry(rbo, &adev->gem.objects, list) {
|
||||
unsigned domain;
|
||||
const char *placement;
|
||||
|
||||
domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type);
|
||||
switch (domain) {
|
||||
case AMDGPU_GEM_DOMAIN_VRAM:
|
||||
placement = "VRAM";
|
||||
break;
|
||||
case AMDGPU_GEM_DOMAIN_GTT:
|
||||
placement = " GTT";
|
||||
break;
|
||||
case AMDGPU_GEM_DOMAIN_CPU:
|
||||
default:
|
||||
placement = " CPU";
|
||||
break;
|
||||
}
|
||||
seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
|
||||
i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20,
|
||||
placement, (unsigned long)rbo->pid);
|
||||
i++;
|
||||
}
|
||||
mutex_unlock(&adev->gem.mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_info_list amdgpu_debugfs_gem_list[] = {
|
||||
{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
|
||||
};
|
||||
#endif
|
||||
|
||||
int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
/*
|
||||
* GPU scratch registers helpers function.
|
||||
*/
|
||||
/**
|
||||
* amdgpu_gfx_scratch_get - Allocate a scratch register
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @reg: scratch register mmio offset
|
||||
*
|
||||
* Allocate a CP scratch register for use by the driver (all asics).
|
||||
* Returns 0 on success or -EINVAL on failure.
|
||||
*/
|
||||
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
|
||||
if (adev->gfx.scratch.free[i]) {
|
||||
adev->gfx.scratch.free[i] = false;
|
||||
*reg = adev->gfx.scratch.reg[i];
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gfx_scratch_free - Free a scratch register
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @reg: scratch register mmio offset
|
||||
*
|
||||
* Free a CP scratch register allocated for use by the driver (all asics)
|
||||
*/
|
||||
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
|
||||
if (adev->gfx.scratch.reg[i] == reg) {
|
||||
adev->gfx.scratch.free[i] = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_GFX_H__
|
||||
#define __AMDGPU_GFX_H__
|
||||
|
||||
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
|
||||
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,395 @@
|
|||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_i2c.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "atom.h"
|
||||
#include "atombios_dp.h"
|
||||
#include "atombios_i2c.h"
|
||||
|
||||
/* bit banging i2c */
|
||||
static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
|
||||
{
|
||||
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
|
||||
struct amdgpu_device *adev = i2c->dev->dev_private;
|
||||
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
|
||||
uint32_t temp;
|
||||
|
||||
mutex_lock(&i2c->mutex);
|
||||
|
||||
/* switch the pads to ddc mode */
|
||||
if (rec->hw_capable) {
|
||||
temp = RREG32(rec->mask_clk_reg);
|
||||
temp &= ~(1 << 16);
|
||||
WREG32(rec->mask_clk_reg, temp);
|
||||
}
|
||||
|
||||
/* clear the output pin values */
|
||||
temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
|
||||
WREG32(rec->a_clk_reg, temp);
|
||||
|
||||
temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
|
||||
WREG32(rec->a_data_reg, temp);
|
||||
|
||||
/* set the pins to input */
|
||||
temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
|
||||
WREG32(rec->en_clk_reg, temp);
|
||||
|
||||
temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
|
||||
WREG32(rec->en_data_reg, temp);
|
||||
|
||||
/* mask the gpio pins for software use */
|
||||
temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
|
||||
WREG32(rec->mask_clk_reg, temp);
|
||||
temp = RREG32(rec->mask_clk_reg);
|
||||
|
||||
temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
|
||||
WREG32(rec->mask_data_reg, temp);
|
||||
temp = RREG32(rec->mask_data_reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
|
||||
{
|
||||
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
|
||||
struct amdgpu_device *adev = i2c->dev->dev_private;
|
||||
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
|
||||
uint32_t temp;
|
||||
|
||||
/* unmask the gpio pins for software use */
|
||||
temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
|
||||
WREG32(rec->mask_clk_reg, temp);
|
||||
temp = RREG32(rec->mask_clk_reg);
|
||||
|
||||
temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
|
||||
WREG32(rec->mask_data_reg, temp);
|
||||
temp = RREG32(rec->mask_data_reg);
|
||||
|
||||
mutex_unlock(&i2c->mutex);
|
||||
}
|
||||
|
||||
static int amdgpu_i2c_get_clock(void *i2c_priv)
|
||||
{
|
||||
struct amdgpu_i2c_chan *i2c = i2c_priv;
|
||||
struct amdgpu_device *adev = i2c->dev->dev_private;
|
||||
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
|
||||
uint32_t val;
|
||||
|
||||
/* read the value off the pin */
|
||||
val = RREG32(rec->y_clk_reg);
|
||||
val &= rec->y_clk_mask;
|
||||
|
||||
return (val != 0);
|
||||
}
|
||||
|
||||
|
||||
static int amdgpu_i2c_get_data(void *i2c_priv)
|
||||
{
|
||||
struct amdgpu_i2c_chan *i2c = i2c_priv;
|
||||
struct amdgpu_device *adev = i2c->dev->dev_private;
|
||||
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
|
||||
uint32_t val;
|
||||
|
||||
/* read the value off the pin */
|
||||
val = RREG32(rec->y_data_reg);
|
||||
val &= rec->y_data_mask;
|
||||
|
||||
return (val != 0);
|
||||
}
|
||||
|
||||
static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
|
||||
{
|
||||
struct amdgpu_i2c_chan *i2c = i2c_priv;
|
||||
struct amdgpu_device *adev = i2c->dev->dev_private;
|
||||
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
|
||||
uint32_t val;
|
||||
|
||||
/* set pin direction */
|
||||
val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
|
||||
val |= clock ? 0 : rec->en_clk_mask;
|
||||
WREG32(rec->en_clk_reg, val);
|
||||
}
|
||||
|
||||
static void amdgpu_i2c_set_data(void *i2c_priv, int data)
|
||||
{
|
||||
struct amdgpu_i2c_chan *i2c = i2c_priv;
|
||||
struct amdgpu_device *adev = i2c->dev->dev_private;
|
||||
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
|
||||
uint32_t val;
|
||||
|
||||
/* set pin direction */
|
||||
val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
|
||||
val |= data ? 0 : rec->en_data_mask;
|
||||
WREG32(rec->en_data_reg, val);
|
||||
}
|
||||
|
||||
static const struct i2c_algorithm amdgpu_atombios_i2c_algo = {
|
||||
.master_xfer = amdgpu_atombios_i2c_xfer,
|
||||
.functionality = amdgpu_atombios_i2c_func,
|
||||
};
|
||||
|
||||
struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
|
||||
struct amdgpu_i2c_bus_rec *rec,
|
||||
const char *name)
|
||||
{
|
||||
struct amdgpu_i2c_chan *i2c;
|
||||
int ret;
|
||||
|
||||
/* don't add the mm_i2c bus unless hw_i2c is enabled */
|
||||
if (rec->mm_i2c && (amdgpu_hw_i2c == 0))
|
||||
return NULL;
|
||||
|
||||
i2c = kzalloc(sizeof(struct amdgpu_i2c_chan), GFP_KERNEL);
|
||||
if (i2c == NULL)
|
||||
return NULL;
|
||||
|
||||
i2c->rec = *rec;
|
||||
i2c->adapter.owner = THIS_MODULE;
|
||||
i2c->adapter.class = I2C_CLASS_DDC;
|
||||
i2c->adapter.dev.parent = &dev->pdev->dev;
|
||||
i2c->dev = dev;
|
||||
i2c_set_adapdata(&i2c->adapter, i2c);
|
||||
mutex_init(&i2c->mutex);
|
||||
if (rec->hw_capable &&
|
||||
amdgpu_hw_i2c) {
|
||||
/* hw i2c using atom */
|
||||
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
|
||||
"AMDGPU i2c hw bus %s", name);
|
||||
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
|
||||
ret = i2c_add_adapter(&i2c->adapter);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to register hw i2c %s\n", name);
|
||||
goto out_free;
|
||||
}
|
||||
} else {
|
||||
/* set the amdgpu bit adapter */
|
||||
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
|
||||
"AMDGPU i2c bit bus %s", name);
|
||||
i2c->adapter.algo_data = &i2c->bit;
|
||||
i2c->bit.pre_xfer = amdgpu_i2c_pre_xfer;
|
||||
i2c->bit.post_xfer = amdgpu_i2c_post_xfer;
|
||||
i2c->bit.setsda = amdgpu_i2c_set_data;
|
||||
i2c->bit.setscl = amdgpu_i2c_set_clock;
|
||||
i2c->bit.getsda = amdgpu_i2c_get_data;
|
||||
i2c->bit.getscl = amdgpu_i2c_get_clock;
|
||||
i2c->bit.udelay = 10;
|
||||
i2c->bit.timeout = usecs_to_jiffies(2200); /* from VESA */
|
||||
i2c->bit.data = i2c;
|
||||
ret = i2c_bit_add_bus(&i2c->adapter);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to register bit i2c %s\n", name);
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
|
||||
return i2c;
|
||||
out_free:
|
||||
kfree(i2c);
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
|
||||
{
|
||||
if (!i2c)
|
||||
return;
|
||||
i2c_del_adapter(&i2c->adapter);
|
||||
kfree(i2c);
|
||||
}
|
||||
|
||||
/* Add the default buses */
|
||||
void amdgpu_i2c_init(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_hw_i2c)
|
||||
DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
|
||||
|
||||
if (adev->is_atom_bios)
|
||||
amdgpu_atombios_i2c_init(adev);
|
||||
}
|
||||
|
||||
/* remove all the buses */
|
||||
void amdgpu_i2c_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
|
||||
if (adev->i2c_bus[i]) {
|
||||
amdgpu_i2c_destroy(adev->i2c_bus[i]);
|
||||
adev->i2c_bus[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Add additional buses */
|
||||
void amdgpu_i2c_add(struct amdgpu_device *adev,
|
||||
struct amdgpu_i2c_bus_rec *rec,
|
||||
const char *name)
|
||||
{
|
||||
struct drm_device *dev = adev->ddev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
|
||||
if (!adev->i2c_bus[i]) {
|
||||
adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* looks up bus based on id */
|
||||
struct amdgpu_i2c_chan *
|
||||
amdgpu_i2c_lookup(struct amdgpu_device *adev,
|
||||
struct amdgpu_i2c_bus_rec *i2c_bus)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
|
||||
if (adev->i2c_bus[i] &&
|
||||
(adev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) {
|
||||
return adev->i2c_bus[i];
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
|
||||
u8 slave_addr,
|
||||
u8 addr,
|
||||
u8 *val)
|
||||
{
|
||||
u8 out_buf[2];
|
||||
u8 in_buf[2];
|
||||
struct i2c_msg msgs[] = {
|
||||
{
|
||||
.addr = slave_addr,
|
||||
.flags = 0,
|
||||
.len = 1,
|
||||
.buf = out_buf,
|
||||
},
|
||||
{
|
||||
.addr = slave_addr,
|
||||
.flags = I2C_M_RD,
|
||||
.len = 1,
|
||||
.buf = in_buf,
|
||||
}
|
||||
};
|
||||
|
||||
out_buf[0] = addr;
|
||||
out_buf[1] = 0;
|
||||
|
||||
if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
|
||||
*val = in_buf[0];
|
||||
DRM_DEBUG("val = 0x%02x\n", *val);
|
||||
} else {
|
||||
DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
|
||||
addr, *val);
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
|
||||
u8 slave_addr,
|
||||
u8 addr,
|
||||
u8 val)
|
||||
{
|
||||
uint8_t out_buf[2];
|
||||
struct i2c_msg msg = {
|
||||
.addr = slave_addr,
|
||||
.flags = 0,
|
||||
.len = 2,
|
||||
.buf = out_buf,
|
||||
};
|
||||
|
||||
out_buf[0] = addr;
|
||||
out_buf[1] = val;
|
||||
|
||||
if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
|
||||
DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
|
||||
addr, val);
|
||||
}
|
||||
|
||||
/* ddc router switching */
|
||||
void
|
||||
amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector)
|
||||
{
|
||||
u8 val;
|
||||
|
||||
if (!amdgpu_connector->router.ddc_valid)
|
||||
return;
|
||||
|
||||
if (!amdgpu_connector->router_bus)
|
||||
return;
|
||||
|
||||
amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
|
||||
amdgpu_connector->router.i2c_addr,
|
||||
0x3, &val);
|
||||
val &= ~amdgpu_connector->router.ddc_mux_control_pin;
|
||||
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
|
||||
amdgpu_connector->router.i2c_addr,
|
||||
0x3, val);
|
||||
amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
|
||||
amdgpu_connector->router.i2c_addr,
|
||||
0x1, &val);
|
||||
val &= ~amdgpu_connector->router.ddc_mux_control_pin;
|
||||
val |= amdgpu_connector->router.ddc_mux_state;
|
||||
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
|
||||
amdgpu_connector->router.i2c_addr,
|
||||
0x1, val);
|
||||
}
|
||||
|
||||
/* clock/data router switching */
|
||||
void
|
||||
amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector)
|
||||
{
|
||||
u8 val;
|
||||
|
||||
if (!amdgpu_connector->router.cd_valid)
|
||||
return;
|
||||
|
||||
if (!amdgpu_connector->router_bus)
|
||||
return;
|
||||
|
||||
amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
|
||||
amdgpu_connector->router.i2c_addr,
|
||||
0x3, &val);
|
||||
val &= ~amdgpu_connector->router.cd_mux_control_pin;
|
||||
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
|
||||
amdgpu_connector->router.i2c_addr,
|
||||
0x3, val);
|
||||
amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
|
||||
amdgpu_connector->router.i2c_addr,
|
||||
0x1, &val);
|
||||
val &= ~amdgpu_connector->router.cd_mux_control_pin;
|
||||
val |= amdgpu_connector->router.cd_mux_state;
|
||||
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
|
||||
amdgpu_connector->router.i2c_addr,
|
||||
0x1, val);
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_I2C_H__
|
||||
#define __AMDGPU_I2C_H__
|
||||
|
||||
struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
|
||||
struct amdgpu_i2c_bus_rec *rec,
|
||||
const char *name);
|
||||
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
|
||||
void amdgpu_i2c_init(struct amdgpu_device *adev);
|
||||
void amdgpu_i2c_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_i2c_add(struct amdgpu_device *adev,
|
||||
struct amdgpu_i2c_bus_rec *rec,
|
||||
const char *name);
|
||||
struct amdgpu_i2c_chan *
|
||||
amdgpu_i2c_lookup(struct amdgpu_device *adev,
|
||||
struct amdgpu_i2c_bus_rec *i2c_bus);
|
||||
void
|
||||
amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector);
|
||||
void
|
||||
amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,345 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
* Christian König
|
||||
*/
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
|
||||
/*
|
||||
* IB
|
||||
* IBs (Indirect Buffers) and areas of GPU accessible memory where
|
||||
* commands are stored. You can put a pointer to the IB in the
|
||||
* command ring and the hw will fetch the commands from the IB
|
||||
* and execute them. Generally userspace acceleration drivers
|
||||
* produce command buffers which are send to the kernel and
|
||||
* put in IBs for execution by the requested ring.
|
||||
*/
|
||||
static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
|
||||
|
||||
/**
|
||||
* amdgpu_ib_get - request an IB (Indirect Buffer)
|
||||
*
|
||||
* @ring: ring index the IB is associated with
|
||||
* @size: requested IB size
|
||||
* @ib: IB object returned
|
||||
*
|
||||
* Request an IB (all asics). IBs are allocated using the
|
||||
* suballocator.
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
|
||||
unsigned size, struct amdgpu_ib *ib)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int r;
|
||||
|
||||
if (size) {
|
||||
r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
|
||||
&ib->sa_bo, size, 256);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
|
||||
|
||||
if (!vm)
|
||||
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
||||
else
|
||||
ib->gpu_addr = 0;
|
||||
|
||||
} else {
|
||||
ib->sa_bo = NULL;
|
||||
ib->ptr = NULL;
|
||||
ib->gpu_addr = 0;
|
||||
}
|
||||
|
||||
amdgpu_sync_create(&ib->sync);
|
||||
|
||||
ib->ring = ring;
|
||||
ib->fence = NULL;
|
||||
ib->user = NULL;
|
||||
ib->vm = vm;
|
||||
ib->is_const_ib = false;
|
||||
ib->gds_base = 0;
|
||||
ib->gds_size = 0;
|
||||
ib->gws_base = 0;
|
||||
ib->gws_size = 0;
|
||||
ib->oa_base = 0;
|
||||
ib->oa_size = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ib_free - free an IB (Indirect Buffer)
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ib: IB object to free
|
||||
*
|
||||
* Free an IB (all asics).
|
||||
*/
|
||||
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
|
||||
{
|
||||
amdgpu_sync_free(adev, &ib->sync, ib->fence);
|
||||
amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence);
|
||||
amdgpu_fence_unref(&ib->fence);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @num_ibs: number of IBs to schedule
|
||||
* @ibs: IB objects to schedule
|
||||
* @owner: owner for creating the fences
|
||||
*
|
||||
* Schedule an IB on the associated ring (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*
|
||||
* On SI, there are two parallel engines fed from the primary ring,
|
||||
* the CE (Constant Engine) and the DE (Drawing Engine). Since
|
||||
* resource descriptors have moved to memory, the CE allows you to
|
||||
* prime the caches while the DE is updating register state so that
|
||||
* the resource descriptors will be already in cache when the draw is
|
||||
* processed. To accomplish this, the userspace driver submits two
|
||||
* IBs, one for the CE and one for the DE. If there is a CE IB (called
|
||||
* a CONST_IB), it will be put on the ring prior to the DE IB. Prior
|
||||
* to SI there was just a DE IB.
|
||||
*/
|
||||
int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
struct amdgpu_ib *ibs, void *owner)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_vm *vm = ibs->vm;
|
||||
struct amdgpu_ib *ib = &ibs[0];
|
||||
unsigned i;
|
||||
int r = 0;
|
||||
bool flush_hdp = true;
|
||||
|
||||
if (num_ibs == 0)
|
||||
return -EINVAL;
|
||||
|
||||
ring = ibs->ring;
|
||||
if (!ring->ready) {
|
||||
dev_err(adev->dev, "couldn't schedule ib\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (vm) {
|
||||
/* grab a vm id if necessary */
|
||||
struct amdgpu_fence *vm_id_fence = NULL;
|
||||
vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
|
||||
amdgpu_sync_fence(&ibs->sync, vm_id_fence);
|
||||
}
|
||||
|
||||
r = amdgpu_sync_rings(&ibs->sync, ring);
|
||||
if (r) {
|
||||
amdgpu_ring_unlock_undo(ring);
|
||||
dev_err(adev->dev, "failed to sync rings (%d)\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (vm) {
|
||||
/* do context switch */
|
||||
amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
|
||||
}
|
||||
|
||||
if (ring->funcs->emit_gds_switch && ib->vm && ib->gds_needed)
|
||||
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
|
||||
ib->gds_base, ib->gds_size,
|
||||
ib->gws_base, ib->gws_size,
|
||||
ib->oa_base, ib->oa_size);
|
||||
|
||||
for (i = 0; i < num_ibs; ++i) {
|
||||
ib = &ibs[i];
|
||||
|
||||
if (ib->ring != ring) {
|
||||
amdgpu_ring_unlock_undo(ring);
|
||||
return -EINVAL;
|
||||
}
|
||||
ib->flush_hdp_writefifo = flush_hdp;
|
||||
flush_hdp = false;
|
||||
amdgpu_ring_emit_ib(ring, ib);
|
||||
}
|
||||
|
||||
r = amdgpu_fence_emit(ring, owner, &ib->fence);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
||||
amdgpu_ring_unlock_undo(ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* wrap the last IB with fence */
|
||||
if (ib->user) {
|
||||
uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
|
||||
addr += ib->user->offset;
|
||||
amdgpu_ring_emit_fence(ring, addr, ib->fence->seq, true);
|
||||
}
|
||||
|
||||
if (ib->vm)
|
||||
amdgpu_vm_fence(adev, ib->vm, ib->fence);
|
||||
|
||||
amdgpu_ring_unlock_commit(ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Initialize the suballocator to manage a pool of memory
|
||||
* for use as IBs (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_ib_pool_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->ib_pool_ready) {
|
||||
return 0;
|
||||
}
|
||||
r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
|
||||
AMDGPU_IB_POOL_SIZE*64*1024,
|
||||
AMDGPU_GPU_PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
adev->ib_pool_ready = true;
|
||||
if (amdgpu_debugfs_sa_init(adev)) {
|
||||
dev_err(adev->dev, "failed to register debugfs file for SA\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Tear down the suballocator managing the pool of memory
|
||||
* for use as IBs (all asics).
|
||||
*/
|
||||
void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->ib_pool_ready) {
|
||||
amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo);
|
||||
amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
|
||||
adev->ib_pool_ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ib_ring_tests - test IBs on the rings
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Test an IB (Indirect Buffer) on each ring.
|
||||
* If the test fails, disable the ring.
|
||||
* Returns 0 on success, error if the primary GFX ring
|
||||
* IB test fails.
|
||||
*/
|
||||
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
if (!ring || !ring->ready)
|
||||
continue;
|
||||
|
||||
r = amdgpu_ring_test_ib(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
adev->needs_reset = false;
|
||||
|
||||
if (ring == &adev->gfx.gfx_ring[0]) {
|
||||
/* oh, oh, that's really bad */
|
||||
DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
|
||||
adev->accel_working = false;
|
||||
return r;
|
||||
|
||||
} else {
|
||||
/* still not good, but we can live with it */
|
||||
DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static struct drm_info_list amdgpu_debugfs_sa_list[] = {
|
||||
{"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_ih.h"
|
||||
|
||||
/**
|
||||
* amdgpu_ih_ring_alloc - allocate memory for the IH ring
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Allocate a ring buffer for the interrupt controller.
|
||||
* Returns 0 for success, errors for failure.
|
||||
*/
|
||||
static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Allocate ring buffer */
|
||||
if (adev->irq.ih.ring_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0,
|
||||
NULL, &adev->irq.ih.ring_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_pin(adev->irq.ih.ring_obj,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->irq.ih.gpu_addr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
|
||||
DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(adev->irq.ih.ring_obj,
|
||||
(void **)&adev->irq.ih.ring);
|
||||
amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ih_ring_init - initialize the IH state
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Initializes the IH state and allocates a buffer
|
||||
* for the IH ring buffer.
|
||||
* Returns 0 for success, errors for failure.
|
||||
*/
|
||||
int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
|
||||
bool use_bus_addr)
|
||||
{
|
||||
u32 rb_bufsz;
|
||||
int r;
|
||||
|
||||
/* Align ring size */
|
||||
rb_bufsz = order_base_2(ring_size / 4);
|
||||
ring_size = (1 << rb_bufsz) * 4;
|
||||
adev->irq.ih.ring_size = ring_size;
|
||||
adev->irq.ih.ptr_mask = adev->irq.ih.ring_size - 1;
|
||||
adev->irq.ih.rptr = 0;
|
||||
adev->irq.ih.use_bus_addr = use_bus_addr;
|
||||
|
||||
if (adev->irq.ih.use_bus_addr) {
|
||||
if (!adev->irq.ih.ring) {
|
||||
/* add 8 bytes for the rptr/wptr shadows and
|
||||
* add them to the end of the ring allocation.
|
||||
*/
|
||||
adev->irq.ih.ring = kzalloc(adev->irq.ih.ring_size + 8, GFP_KERNEL);
|
||||
if (adev->irq.ih.ring == NULL)
|
||||
return -ENOMEM;
|
||||
adev->irq.ih.rb_dma_addr = pci_map_single(adev->pdev,
|
||||
(void *)adev->irq.ih.ring,
|
||||
adev->irq.ih.ring_size,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(adev->pdev, adev->irq.ih.rb_dma_addr)) {
|
||||
dev_err(&adev->pdev->dev, "Failed to DMA MAP the IH RB page\n");
|
||||
kfree((void *)adev->irq.ih.ring);
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
|
||||
adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
r = amdgpu_wb_get(adev, &adev->irq.ih.wptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &adev->irq.ih.rptr_offs);
|
||||
if (r) {
|
||||
amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
|
||||
dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
return amdgpu_ih_ring_alloc(adev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ih_ring_fini - tear down the IH state
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Tears down the IH state and frees buffer
|
||||
* used for the IH ring buffer.
|
||||
*/
|
||||
void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->irq.ih.use_bus_addr) {
|
||||
if (adev->irq.ih.ring) {
|
||||
/* add 8 bytes for the rptr/wptr shadows and
|
||||
* add them to the end of the ring allocation.
|
||||
*/
|
||||
pci_unmap_single(adev->pdev, adev->irq.ih.rb_dma_addr,
|
||||
adev->irq.ih.ring_size + 8, PCI_DMA_BIDIRECTIONAL);
|
||||
kfree((void *)adev->irq.ih.ring);
|
||||
adev->irq.ih.ring = NULL;
|
||||
}
|
||||
} else {
|
||||
if (adev->irq.ih.ring_obj) {
|
||||
r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_kunmap(adev->irq.ih.ring_obj);
|
||||
amdgpu_bo_unpin(adev->irq.ih.ring_obj);
|
||||
amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
|
||||
}
|
||||
amdgpu_bo_unref(&adev->irq.ih.ring_obj);
|
||||
adev->irq.ih.ring = NULL;
|
||||
adev->irq.ih.ring_obj = NULL;
|
||||
}
|
||||
amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
|
||||
amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ih_process - interrupt handler
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Interrupt hander (VI), walk the IH ring.
|
||||
* Returns irq process return code.
|
||||
*/
|
||||
int amdgpu_ih_process(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_iv_entry entry;
|
||||
u32 wptr;
|
||||
|
||||
if (!adev->irq.ih.enabled || adev->shutdown)
|
||||
return IRQ_NONE;
|
||||
|
||||
wptr = amdgpu_ih_get_wptr(adev);
|
||||
|
||||
restart_ih:
|
||||
/* is somebody else already processing irqs? */
|
||||
if (atomic_xchg(&adev->irq.ih.lock, 1))
|
||||
return IRQ_NONE;
|
||||
|
||||
DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr);
|
||||
|
||||
/* Order reading of wptr vs. reading of IH ring data */
|
||||
rmb();
|
||||
|
||||
while (adev->irq.ih.rptr != wptr) {
|
||||
amdgpu_ih_decode_iv(adev, &entry);
|
||||
adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
|
||||
|
||||
amdgpu_irq_dispatch(adev, &entry);
|
||||
}
|
||||
amdgpu_ih_set_rptr(adev);
|
||||
atomic_set(&adev->irq.ih.lock, 0);
|
||||
|
||||
/* make sure wptr hasn't changed while processing */
|
||||
wptr = amdgpu_ih_get_wptr(adev);
|
||||
if (wptr != adev->irq.ih.rptr)
|
||||
goto restart_ih;
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_IH_H__
|
||||
#define __AMDGPU_IH_H__
|
||||
|
||||
struct amdgpu_device;
|
||||
|
||||
/*
|
||||
* R6xx+ IH ring
|
||||
*/
|
||||
struct amdgpu_ih_ring {
|
||||
struct amdgpu_bo *ring_obj;
|
||||
volatile uint32_t *ring;
|
||||
unsigned rptr;
|
||||
unsigned ring_size;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t ptr_mask;
|
||||
atomic_t lock;
|
||||
bool enabled;
|
||||
unsigned wptr_offs;
|
||||
unsigned rptr_offs;
|
||||
u32 doorbell_index;
|
||||
bool use_doorbell;
|
||||
bool use_bus_addr;
|
||||
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
|
||||
};
|
||||
|
||||
struct amdgpu_iv_entry {
|
||||
unsigned src_id;
|
||||
unsigned src_data;
|
||||
unsigned ring_id;
|
||||
unsigned vm_id;
|
||||
unsigned pas_id;
|
||||
};
|
||||
|
||||
int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
|
||||
bool use_bus_addr);
|
||||
void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_ih_process(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,47 @@
|
|||
/**
|
||||
* \file amdgpu_ioc32.c
|
||||
*
|
||||
* 32-bit ioctl compatibility routines for the AMDGPU DRM.
|
||||
*
|
||||
* \author Paul Mackerras <paulus@samba.org>
|
||||
*
|
||||
* Copyright (C) Paul Mackerras 2005
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
#include <linux/compat.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu_drv.h"
|
||||
|
||||
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
unsigned int nr = DRM_IOCTL_NR(cmd);
|
||||
int ret;
|
||||
|
||||
if (nr < DRM_COMMAND_BASE)
|
||||
return drm_compat_ioctl(filp, cmd, arg);
|
||||
|
||||
ret = amdgpu_drm_ioctl(filp, cmd, arg);
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -0,0 +1,456 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_ih.h"
|
||||
#include "atom.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
|
||||
|
||||
/*
|
||||
* Handle hotplug events outside the interrupt handler proper.
|
||||
*/
|
||||
/**
|
||||
* amdgpu_hotplug_work_func - display hotplug work handler
|
||||
*
|
||||
* @work: work struct
|
||||
*
|
||||
* This is the hot plug event work handler (all asics).
|
||||
* The work gets scheduled from the irq handler if there
|
||||
* was a hot plug interrupt. It walks the connector table
|
||||
* and calls the hotplug handler for each one, then sends
|
||||
* a drm hotplug event to alert userspace.
|
||||
*/
|
||||
static void amdgpu_hotplug_work_func(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
|
||||
hotplug_work);
|
||||
struct drm_device *dev = adev->ddev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
|
||||
if (mode_config->num_connector) {
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head)
|
||||
amdgpu_connector_hotplug(connector);
|
||||
}
|
||||
/* Just fire off a uevent and let userspace tell us what to do */
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_reset_work_func - execute gpu reset
|
||||
*
|
||||
* @work: work struct
|
||||
*
|
||||
* Execute scheduled gpu reset (cayman+).
|
||||
* This function is called when the irq handler
|
||||
* thinks we need a gpu reset.
|
||||
*/
|
||||
static void amdgpu_irq_reset_work_func(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
|
||||
reset_work);
|
||||
|
||||
amdgpu_gpu_reset(adev);
|
||||
}
|
||||
|
||||
/* Disable *all* interrupts */
|
||||
static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
unsigned i, j;
|
||||
int r;
|
||||
|
||||
spin_lock_irqsave(&adev->irq.lock, irqflags);
|
||||
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
|
||||
struct amdgpu_irq_src *src = adev->irq.sources[i];
|
||||
|
||||
if (!src || !src->funcs->set || !src->num_types)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < src->num_types; ++j) {
|
||||
atomic_set(&src->enabled_types[j], 0);
|
||||
r = src->funcs->set(adev, src, j,
|
||||
AMDGPU_IRQ_STATE_DISABLE);
|
||||
if (r)
|
||||
DRM_ERROR("error disabling interrupt (%d)\n",
|
||||
r);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&adev->irq.lock, irqflags);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_preinstall - drm irq preinstall callback
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
*
|
||||
* Gets the hw ready to enable irqs (all asics).
|
||||
* This function disables all interrupt sources on the GPU.
|
||||
*/
|
||||
void amdgpu_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
/* Disable *all* interrupts */
|
||||
amdgpu_irq_disable_all(adev);
|
||||
/* Clear bits */
|
||||
amdgpu_ih_process(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_postinstall - drm irq preinstall callback
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
*
|
||||
* Handles stuff to be done after enabling irqs (all asics).
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
int amdgpu_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
dev->max_vblank_count = 0x001fffff;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_uninstall - drm irq uninstall callback
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
*
|
||||
* This function disables all interrupt sources on the GPU (all asics).
|
||||
*/
|
||||
void amdgpu_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
if (adev == NULL) {
|
||||
return;
|
||||
}
|
||||
amdgpu_irq_disable_all(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_handler - irq handler
|
||||
*
|
||||
* @int irq, void *arg: args
|
||||
*
|
||||
* This is the irq handler for the amdgpu driver (all asics).
|
||||
*/
|
||||
irqreturn_t amdgpu_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
irqreturn_t ret;
|
||||
|
||||
ret = amdgpu_ih_process(adev);
|
||||
if (ret == IRQ_HANDLED)
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_msi_ok - asic specific msi checks
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Handles asic specific MSI checks to determine if
|
||||
* MSIs should be enabled on a particular chip (all asics).
|
||||
* Returns true if MSIs should be enabled, false if MSIs
|
||||
* should not be enabled.
|
||||
*/
|
||||
static bool amdgpu_msi_ok(struct amdgpu_device *adev)
|
||||
{
|
||||
/* force MSI on */
|
||||
if (amdgpu_msi == 1)
|
||||
return true;
|
||||
else if (amdgpu_msi == 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_init - init driver interrupt info
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
spin_lock_init(&adev->irq.lock);
|
||||
r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
/* enable msi */
|
||||
adev->irq.msi_enabled = false;
|
||||
|
||||
if (amdgpu_msi_ok(adev)) {
|
||||
int ret = pci_enable_msi(adev->pdev);
|
||||
if (!ret) {
|
||||
adev->irq.msi_enabled = true;
|
||||
dev_info(adev->dev, "amdgpu: using MSI.\n");
|
||||
}
|
||||
}
|
||||
|
||||
INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
|
||||
INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
|
||||
|
||||
adev->irq.installed = true;
|
||||
r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
|
||||
if (r) {
|
||||
adev->irq.installed = false;
|
||||
flush_work(&adev->hotplug_work);
|
||||
return r;
|
||||
}
|
||||
|
||||
DRM_INFO("amdgpu: irq initialized.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_fini - tear down driver interrupt info
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
|
||||
*/
|
||||
void amdgpu_irq_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
drm_vblank_cleanup(adev->ddev);
|
||||
if (adev->irq.installed) {
|
||||
drm_irq_uninstall(adev->ddev);
|
||||
adev->irq.installed = false;
|
||||
if (adev->irq.msi_enabled)
|
||||
pci_disable_msi(adev->pdev);
|
||||
flush_work(&adev->hotplug_work);
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
|
||||
struct amdgpu_irq_src *src = adev->irq.sources[i];
|
||||
|
||||
if (!src)
|
||||
continue;
|
||||
|
||||
kfree(src->enabled_types);
|
||||
src->enabled_types = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_add_id - register irq source
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @src_id: source id for this source
|
||||
* @source: irq source
|
||||
*
|
||||
*/
|
||||
int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
|
||||
struct amdgpu_irq_src *source)
|
||||
{
|
||||
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->irq.sources[src_id] != NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (!source->funcs)
|
||||
return -EINVAL;
|
||||
|
||||
if (source->num_types && !source->enabled_types) {
|
||||
atomic_t *types;
|
||||
|
||||
types = kcalloc(source->num_types, sizeof(atomic_t),
|
||||
GFP_KERNEL);
|
||||
if (!types)
|
||||
return -ENOMEM;
|
||||
|
||||
source->enabled_types = types;
|
||||
}
|
||||
|
||||
adev->irq.sources[src_id] = source;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_dispatch - dispatch irq to IP blocks
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @entry: interrupt vector
|
||||
*
|
||||
* Dispatches the irq to the different IP blocks
|
||||
*/
|
||||
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
unsigned src_id = entry->src_id;
|
||||
struct amdgpu_irq_src *src;
|
||||
int r;
|
||||
|
||||
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
|
||||
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
|
||||
return;
|
||||
}
|
||||
|
||||
src = adev->irq.sources[src_id];
|
||||
if (!src) {
|
||||
DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
|
||||
return;
|
||||
}
|
||||
|
||||
r = src->funcs->process(adev, src, entry);
|
||||
if (r)
|
||||
DRM_ERROR("error processing interrupt (%d)\n", r);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_update - update hw interrupt state
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @src: interrupt src you want to enable
|
||||
* @type: type of interrupt you want to update
|
||||
*
|
||||
* Updates the interrupt state for a specific src (all asics).
|
||||
*/
|
||||
int amdgpu_irq_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src, unsigned type)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
enum amdgpu_interrupt_state state;
|
||||
int r;
|
||||
|
||||
spin_lock_irqsave(&adev->irq.lock, irqflags);
|
||||
|
||||
/* we need to determine after taking the lock, otherwise
|
||||
we might disable just enabled interrupts again */
|
||||
if (amdgpu_irq_enabled(adev, src, type))
|
||||
state = AMDGPU_IRQ_STATE_ENABLE;
|
||||
else
|
||||
state = AMDGPU_IRQ_STATE_DISABLE;
|
||||
|
||||
r = src->funcs->set(adev, src, type, state);
|
||||
spin_unlock_irqrestore(&adev->irq.lock, irqflags);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_get - enable interrupt
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @src: interrupt src you want to enable
|
||||
* @type: type of interrupt you want to enable
|
||||
*
|
||||
* Enables the interrupt type for a specific src (all asics).
|
||||
*/
|
||||
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type)
|
||||
{
|
||||
if (!adev->ddev->irq_enabled)
|
||||
return -ENOENT;
|
||||
|
||||
if (type >= src->num_types)
|
||||
return -EINVAL;
|
||||
|
||||
if (!src->enabled_types || !src->funcs->set)
|
||||
return -EINVAL;
|
||||
|
||||
if (atomic_inc_return(&src->enabled_types[type]) == 1)
|
||||
return amdgpu_irq_update(adev, src, type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type)
|
||||
{
|
||||
if ((type >= src->num_types) || !src->enabled_types)
|
||||
return false;
|
||||
return atomic_inc_return(&src->enabled_types[type]) == 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_put - disable interrupt
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @src: interrupt src you want to disable
|
||||
* @type: type of interrupt you want to disable
|
||||
*
|
||||
* Disables the interrupt type for a specific src (all asics).
|
||||
*/
|
||||
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type)
|
||||
{
|
||||
if (!adev->ddev->irq_enabled)
|
||||
return -ENOENT;
|
||||
|
||||
if (type >= src->num_types)
|
||||
return -EINVAL;
|
||||
|
||||
if (!src->enabled_types || !src->funcs->set)
|
||||
return -EINVAL;
|
||||
|
||||
if (atomic_dec_and_test(&src->enabled_types[type]))
|
||||
return amdgpu_irq_update(adev, src, type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_irq_enabled - test if irq is enabled or not
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @idx: interrupt src you want to test
|
||||
*
|
||||
* Tests if the given interrupt source is enabled or not
|
||||
*/
|
||||
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type)
|
||||
{
|
||||
if (!adev->ddev->irq_enabled)
|
||||
return false;
|
||||
|
||||
if (type >= src->num_types)
|
||||
return false;
|
||||
|
||||
if (!src->enabled_types || !src->funcs->set)
|
||||
return false;
|
||||
|
||||
return !!atomic_read(&src->enabled_types[type]);
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_IRQ_H__
|
||||
#define __AMDGPU_IRQ_H__
|
||||
|
||||
#include "amdgpu_ih.h"
|
||||
|
||||
#define AMDGPU_MAX_IRQ_SRC_ID 0x100
|
||||
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_iv_entry;
|
||||
|
||||
enum amdgpu_interrupt_state {
|
||||
AMDGPU_IRQ_STATE_DISABLE,
|
||||
AMDGPU_IRQ_STATE_ENABLE,
|
||||
};
|
||||
|
||||
struct amdgpu_irq_src {
|
||||
unsigned num_types;
|
||||
atomic_t *enabled_types;
|
||||
const struct amdgpu_irq_src_funcs *funcs;
|
||||
};
|
||||
|
||||
/* provided by interrupt generating IP blocks */
|
||||
struct amdgpu_irq_src_funcs {
|
||||
int (*set)(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
|
||||
unsigned type, enum amdgpu_interrupt_state state);
|
||||
|
||||
int (*process)(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
};
|
||||
|
||||
struct amdgpu_irq {
|
||||
bool installed;
|
||||
spinlock_t lock;
|
||||
/* interrupt sources */
|
||||
struct amdgpu_irq_src *sources[AMDGPU_MAX_IRQ_SRC_ID];
|
||||
|
||||
/* status, etc. */
|
||||
bool msi_enabled; /* msi enabled */
|
||||
|
||||
/* interrupt ring */
|
||||
struct amdgpu_ih_ring ih;
|
||||
const struct amdgpu_ih_funcs *ih_funcs;
|
||||
};
|
||||
|
||||
void amdgpu_irq_preinstall(struct drm_device *dev);
|
||||
int amdgpu_irq_postinstall(struct drm_device *dev);
|
||||
void amdgpu_irq_uninstall(struct drm_device *dev);
|
||||
irqreturn_t amdgpu_irq_handler(int irq, void *arg);
|
||||
|
||||
int amdgpu_irq_init(struct amdgpu_device *adev);
|
||||
void amdgpu_irq_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
|
||||
struct amdgpu_irq_src *source);
|
||||
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type);
|
||||
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type);
|
||||
bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type);
|
||||
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type);
|
||||
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
unsigned type);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,674 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu_uvd.h"
|
||||
#include "amdgpu_vce.h"
|
||||
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
bool amdgpu_has_atpx(void);
|
||||
#else
|
||||
static inline bool amdgpu_has_atpx(void) { return false; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* amdgpu_driver_unload_kms - Main unload function for KMS.
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
*
|
||||
* This is the main unload function for KMS (all asics).
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
int amdgpu_driver_unload_kms(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
if (adev == NULL)
|
||||
return 0;
|
||||
|
||||
if (adev->rmmio == NULL)
|
||||
goto done_free;
|
||||
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
|
||||
amdgpu_acpi_fini(adev);
|
||||
|
||||
amdgpu_device_fini(adev);
|
||||
|
||||
done_free:
|
||||
kfree(adev);
|
||||
dev->dev_private = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_driver_load_kms - Main load function for KMS.
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @flags: device flags
|
||||
*
|
||||
* This is the main load function for KMS (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
int r, acpi_status;
|
||||
|
||||
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
|
||||
if (adev == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev->dev_private = (void *)adev;
|
||||
|
||||
if ((amdgpu_runtime_pm != 0) &&
|
||||
amdgpu_has_atpx() &&
|
||||
((flags & AMDGPU_IS_APU) == 0))
|
||||
flags |= AMDGPU_IS_PX;
|
||||
|
||||
/* amdgpu_device_init should report only fatal error
|
||||
* like memory allocation failure or iomapping failure,
|
||||
* or memory manager initialization failure, it must
|
||||
* properly initialize the GPU MC controller and permit
|
||||
* VRAM allocation
|
||||
*/
|
||||
r = amdgpu_device_init(adev, dev, dev->pdev, flags);
|
||||
if (r) {
|
||||
dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Call ACPI methods: require modeset init
|
||||
* but failure is not fatal
|
||||
*/
|
||||
if (!r) {
|
||||
acpi_status = amdgpu_acpi_init(adev);
|
||||
if (acpi_status)
|
||||
dev_dbg(&dev->pdev->dev,
|
||||
"Error during ACPI methods call\n");
|
||||
}
|
||||
|
||||
if (amdgpu_device_is_px(dev)) {
|
||||
pm_runtime_use_autosuspend(dev->dev);
|
||||
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
|
||||
pm_runtime_set_active(dev->dev);
|
||||
pm_runtime_allow(dev->dev);
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
|
||||
out:
|
||||
if (r)
|
||||
amdgpu_driver_unload_kms(dev);
|
||||
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* Userspace get information ioctl
|
||||
*/
|
||||
/**
|
||||
* amdgpu_info_ioctl - answer a device specific request.
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @data: request object
|
||||
* @filp: drm filp
|
||||
*
|
||||
* This function is used to pass device specific parameters to the userspace
|
||||
* drivers. Examples include: pci device id, pipeline parms, tiling params,
|
||||
* etc. (all asics).
|
||||
* Returns 0 on success, -EINVAL on failure.
|
||||
*/
|
||||
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_amdgpu_info *info = data;
|
||||
struct amdgpu_mode_info *minfo = &adev->mode_info;
|
||||
void __user *out = (void __user *)(long)info->return_pointer;
|
||||
uint32_t size = info->return_size;
|
||||
struct drm_crtc *crtc;
|
||||
uint32_t ui32 = 0;
|
||||
uint64_t ui64 = 0;
|
||||
int i, found;
|
||||
|
||||
if (!info->return_size || !info->return_pointer)
|
||||
return -EINVAL;
|
||||
|
||||
switch (info->query) {
|
||||
case AMDGPU_INFO_ACCEL_WORKING:
|
||||
ui32 = adev->accel_working;
|
||||
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_CRTC_FROM_ID:
|
||||
for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
crtc = (struct drm_crtc *)minfo->crtcs[i];
|
||||
if (crtc && crtc->base.id == info->mode_crtc.id) {
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
ui32 = amdgpu_crtc->crtc_id;
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
|
||||
return -EINVAL;
|
||||
}
|
||||
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_HW_IP_INFO: {
|
||||
struct drm_amdgpu_info_hw_ip ip = {};
|
||||
enum amdgpu_ip_block_type type;
|
||||
uint32_t ring_mask = 0;
|
||||
|
||||
if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
switch (info->query_hw_ip.type) {
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
type = AMDGPU_IP_BLOCK_TYPE_GFX;
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
type = AMDGPU_IP_BLOCK_TYPE_GFX;
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||
ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
type = AMDGPU_IP_BLOCK_TYPE_SDMA;
|
||||
ring_mask = adev->sdma[0].ring.ready ? 1 : 0;
|
||||
ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1);
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
type = AMDGPU_IP_BLOCK_TYPE_UVD;
|
||||
ring_mask = adev->uvd.ring.ready ? 1 : 0;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
type = AMDGPU_IP_BLOCK_TYPE_VCE;
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++)
|
||||
ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (adev->ip_blocks[i].type == type &&
|
||||
adev->ip_block_enabled[i]) {
|
||||
ip.hw_ip_version_major = adev->ip_blocks[i].major;
|
||||
ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
|
||||
ip.capabilities_flags = 0;
|
||||
ip.available_rings = ring_mask;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return copy_to_user(out, &ip,
|
||||
min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_HW_IP_COUNT: {
|
||||
enum amdgpu_ip_block_type type;
|
||||
uint32_t count = 0;
|
||||
|
||||
switch (info->query_hw_ip.type) {
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
type = AMDGPU_IP_BLOCK_TYPE_GFX;
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
type = AMDGPU_IP_BLOCK_TYPE_GFX;
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
type = AMDGPU_IP_BLOCK_TYPE_SDMA;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
type = AMDGPU_IP_BLOCK_TYPE_UVD;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
type = AMDGPU_IP_BLOCK_TYPE_VCE;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++)
|
||||
if (adev->ip_blocks[i].type == type &&
|
||||
adev->ip_block_enabled[i] &&
|
||||
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
|
||||
count++;
|
||||
|
||||
return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_TIMESTAMP:
|
||||
ui64 = amdgpu_asic_get_gpu_clock_counter(adev);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_FW_VERSION: {
|
||||
struct drm_amdgpu_info_firmware fw_info;
|
||||
|
||||
/* We only support one instance of each IP block right now. */
|
||||
if (info->query_fw.ip_instance != 0)
|
||||
return -EINVAL;
|
||||
|
||||
switch (info->query_fw.fw_type) {
|
||||
case AMDGPU_INFO_FW_VCE:
|
||||
fw_info.ver = adev->vce.fw_version;
|
||||
fw_info.feature = adev->vce.fb_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_UVD:
|
||||
fw_info.ver = 0;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GMC:
|
||||
fw_info.ver = adev->mc.fw_version;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_ME:
|
||||
fw_info.ver = adev->gfx.me_fw_version;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_PFP:
|
||||
fw_info.ver = adev->gfx.pfp_fw_version;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_CE:
|
||||
fw_info.ver = adev->gfx.ce_fw_version;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_RLC:
|
||||
fw_info.ver = adev->gfx.rlc_fw_version;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_MEC:
|
||||
if (info->query_fw.index == 0)
|
||||
fw_info.ver = adev->gfx.mec_fw_version;
|
||||
else if (info->query_fw.index == 1)
|
||||
fw_info.ver = adev->gfx.mec2_fw_version;
|
||||
else
|
||||
return -EINVAL;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_SMC:
|
||||
fw_info.ver = adev->pm.fw_version;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_SDMA:
|
||||
if (info->query_fw.index >= 2)
|
||||
return -EINVAL;
|
||||
fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return copy_to_user(out, &fw_info,
|
||||
min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_NUM_BYTES_MOVED:
|
||||
ui64 = atomic64_read(&adev->num_bytes_moved);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_VRAM_USAGE:
|
||||
ui64 = atomic64_read(&adev->vram_usage);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_VIS_VRAM_USAGE:
|
||||
ui64 = atomic64_read(&adev->vram_vis_usage);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_GTT_USAGE:
|
||||
ui64 = atomic64_read(&adev->gtt_usage);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_GDS_CONFIG: {
|
||||
struct drm_amdgpu_info_gds gds_info;
|
||||
|
||||
gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
|
||||
gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
|
||||
gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
|
||||
gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
|
||||
gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
|
||||
gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
|
||||
gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
|
||||
return copy_to_user(out, &gds_info,
|
||||
min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_VRAM_GTT: {
|
||||
struct drm_amdgpu_info_vram_gtt vram_gtt;
|
||||
|
||||
vram_gtt.vram_size = adev->mc.real_vram_size;
|
||||
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
|
||||
vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
|
||||
vram_gtt.gtt_size = adev->mc.gtt_size;
|
||||
vram_gtt.gtt_size -= adev->gart_pin_size;
|
||||
return copy_to_user(out, &vram_gtt,
|
||||
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_READ_MMR_REG: {
|
||||
unsigned n, alloc_size = info->read_mmr_reg.count * 4;
|
||||
uint32_t *regs;
|
||||
unsigned se_num = (info->read_mmr_reg.instance >>
|
||||
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
|
||||
AMDGPU_INFO_MMR_SE_INDEX_MASK;
|
||||
unsigned sh_num = (info->read_mmr_reg.instance >>
|
||||
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
|
||||
AMDGPU_INFO_MMR_SH_INDEX_MASK;
|
||||
|
||||
/* set full masks if the userspace set all bits
|
||||
* in the bitfields */
|
||||
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
|
||||
se_num = 0xffffffff;
|
||||
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
|
||||
sh_num = 0xffffffff;
|
||||
|
||||
regs = kmalloc(alloc_size, GFP_KERNEL);
|
||||
if (!regs)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < info->read_mmr_reg.count; i++)
|
||||
if (amdgpu_asic_read_register(adev, se_num, sh_num,
|
||||
info->read_mmr_reg.dword_offset + i,
|
||||
®s[i])) {
|
||||
DRM_DEBUG_KMS("unallowed offset %#x\n",
|
||||
info->read_mmr_reg.dword_offset + i);
|
||||
kfree(regs);
|
||||
return -EFAULT;
|
||||
}
|
||||
n = copy_to_user(out, regs, min(size, alloc_size));
|
||||
kfree(regs);
|
||||
return n ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_DEV_INFO: {
|
||||
struct drm_amdgpu_info_device dev_info;
|
||||
struct amdgpu_cu_info cu_info;
|
||||
|
||||
dev_info.device_id = dev->pdev->device;
|
||||
dev_info.chip_rev = adev->rev_id;
|
||||
dev_info.external_rev = adev->external_rev_id;
|
||||
dev_info.pci_rev = dev->pdev->revision;
|
||||
dev_info.family = adev->family;
|
||||
dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
|
||||
dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
|
||||
/* return all clocks in KHz */
|
||||
dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
|
||||
if (adev->pm.dpm_enabled)
|
||||
dev_info.max_engine_clock =
|
||||
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
|
||||
else
|
||||
dev_info.max_engine_clock = adev->pm.default_sclk * 10;
|
||||
dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
|
||||
dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
|
||||
adev->gfx.config.max_shader_engines;
|
||||
dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
|
||||
dev_info._pad = 0;
|
||||
dev_info.ids_flags = 0;
|
||||
if (adev->flags & AMDGPU_IS_APU)
|
||||
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
|
||||
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
|
||||
dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL);
|
||||
dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
|
||||
AMDGPU_GPU_PAGE_SIZE;
|
||||
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
amdgpu_asic_get_cu_info(adev, &cu_info);
|
||||
dev_info.cu_active_number = cu_info.number;
|
||||
dev_info.cu_ao_mask = cu_info.ao_cu_mask;
|
||||
memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
|
||||
|
||||
return copy_to_user(out, &dev_info,
|
||||
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
|
||||
}
|
||||
default:
|
||||
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Outdated mess for old drm with Xorg being in charge (void function now).
|
||||
*/
|
||||
/**
|
||||
* amdgpu_driver_firstopen_kms - drm callback for last close
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
*
|
||||
* Switch vga switcheroo state after last close (all asics).
|
||||
*/
|
||||
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
|
||||
{
|
||||
vga_switcheroo_process_delayed_switch();
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_driver_open_kms - drm callback for open
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @file_priv: drm file
|
||||
*
|
||||
* On device open, init vm on cayman+ (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_fpriv *fpriv;
|
||||
int r;
|
||||
|
||||
file_priv->driver_priv = NULL;
|
||||
|
||||
r = pm_runtime_get_sync(dev->dev);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
|
||||
if (unlikely(!fpriv))
|
||||
return -ENOMEM;
|
||||
|
||||
r = amdgpu_vm_init(adev, &fpriv->vm);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
mutex_init(&fpriv->bo_list_lock);
|
||||
idr_init(&fpriv->bo_list_handles);
|
||||
|
||||
/* init context manager */
|
||||
mutex_init(&fpriv->ctx_mgr.hlock);
|
||||
idr_init(&fpriv->ctx_mgr.ctx_handles);
|
||||
fpriv->ctx_mgr.adev = adev;
|
||||
|
||||
file_priv->driver_priv = fpriv;
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
kfree(fpriv);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_driver_postclose_kms - drm callback for post close
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @file_priv: drm file
|
||||
*
|
||||
* On device post close, tear down vm on cayman+ (all asics).
|
||||
*/
|
||||
void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
||||
struct amdgpu_bo_list *list;
|
||||
int handle;
|
||||
|
||||
if (!fpriv)
|
||||
return;
|
||||
|
||||
amdgpu_vm_fini(adev, &fpriv->vm);
|
||||
|
||||
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
|
||||
amdgpu_bo_list_free(list);
|
||||
|
||||
idr_destroy(&fpriv->bo_list_handles);
|
||||
mutex_destroy(&fpriv->bo_list_lock);
|
||||
|
||||
/* release context */
|
||||
amdgpu_ctx_fini(fpriv);
|
||||
|
||||
kfree(fpriv);
|
||||
file_priv->driver_priv = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_driver_preclose_kms - drm callback for pre close
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @file_priv: drm file
|
||||
*
|
||||
* On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
|
||||
* (all asics).
|
||||
*/
|
||||
void amdgpu_driver_preclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
amdgpu_uvd_free_handles(adev, file_priv);
|
||||
amdgpu_vce_free_handles(adev, file_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* VBlank related functions.
|
||||
*/
|
||||
/**
|
||||
* amdgpu_get_vblank_counter_kms - get frame count
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @crtc: crtc to get the frame count from
|
||||
*
|
||||
* Gets the frame count on the requested crtc (all asics).
|
||||
* Returns frame count on success, -EINVAL on failure.
|
||||
*/
|
||||
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
|
||||
DRM_ERROR("Invalid crtc %d\n", crtc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return amdgpu_display_vblank_get_counter(adev, crtc);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_enable_vblank_kms - enable vblank interrupt
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @crtc: crtc to enable vblank interrupt for
|
||||
*
|
||||
* Enable the interrupt on the requested crtc (all asics).
|
||||
* Returns 0 on success, -EINVAL on failure.
|
||||
*/
|
||||
int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc);
|
||||
|
||||
return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_disable_vblank_kms - disable vblank interrupt
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @crtc: crtc to disable vblank interrupt for
|
||||
*
|
||||
* Disable the interrupt on the requested crtc (all asics).
|
||||
*/
|
||||
void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->crtc_irq, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_get_vblank_timestamp_kms - get vblank timestamp
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @crtc: crtc to get the timestamp for
|
||||
* @max_error: max error
|
||||
* @vblank_time: time value
|
||||
* @flags: flags passed to the driver
|
||||
*
|
||||
* Gets the timestamp on the requested crtc based on the
|
||||
* scanout position. (all asics).
|
||||
* Returns postive status flags on success, negative error on failure.
|
||||
*/
|
||||
int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
|
||||
int *max_error,
|
||||
struct timeval *vblank_time,
|
||||
unsigned flags)
|
||||
{
|
||||
struct drm_crtc *drmcrtc;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
if (crtc < 0 || crtc >= dev->num_crtcs) {
|
||||
DRM_ERROR("Invalid crtc %d\n", crtc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get associated drm_crtc: */
|
||||
drmcrtc = &adev->mode_info.crtcs[crtc]->base;
|
||||
|
||||
/* Helper routine in DRM core does all the work: */
|
||||
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
|
||||
vblank_time, flags,
|
||||
drmcrtc, &drmcrtc->hwmode);
|
||||
}
|
||||
|
||||
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
/* KMS */
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
};
|
||||
int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
|
|
@ -0,0 +1,319 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Christian König <christian.koenig@amd.com>
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
|
||||
struct amdgpu_mn {
|
||||
/* constant after initialisation */
|
||||
struct amdgpu_device *adev;
|
||||
struct mm_struct *mm;
|
||||
struct mmu_notifier mn;
|
||||
|
||||
/* only used on destruction */
|
||||
struct work_struct work;
|
||||
|
||||
/* protected by adev->mn_lock */
|
||||
struct hlist_node node;
|
||||
|
||||
/* objects protected by lock */
|
||||
struct mutex lock;
|
||||
struct rb_root objects;
|
||||
};
|
||||
|
||||
struct amdgpu_mn_node {
|
||||
struct interval_tree_node it;
|
||||
struct list_head bos;
|
||||
};
|
||||
|
||||
/**
|
||||
* amdgpu_mn_destroy - destroy the rmn
|
||||
*
|
||||
* @work: previously sheduled work item
|
||||
*
|
||||
* Lazy destroys the notifier from a work item
|
||||
*/
|
||||
static void amdgpu_mn_destroy(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
|
||||
struct amdgpu_device *adev = rmn->adev;
|
||||
struct amdgpu_mn_node *node, *next_node;
|
||||
struct amdgpu_bo *bo, *next_bo;
|
||||
|
||||
mutex_lock(&adev->mn_lock);
|
||||
mutex_lock(&rmn->lock);
|
||||
hash_del(&rmn->node);
|
||||
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
|
||||
it.rb) {
|
||||
|
||||
interval_tree_remove(&node->it, &rmn->objects);
|
||||
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
|
||||
bo->mn = NULL;
|
||||
list_del_init(&bo->mn_list);
|
||||
}
|
||||
kfree(node);
|
||||
}
|
||||
mutex_unlock(&rmn->lock);
|
||||
mutex_unlock(&adev->mn_lock);
|
||||
mmu_notifier_unregister(&rmn->mn, rmn->mm);
|
||||
kfree(rmn);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mn_release - callback to notify about mm destruction
|
||||
*
|
||||
* @mn: our notifier
|
||||
* @mn: the mm this callback is about
|
||||
*
|
||||
* Shedule a work item to lazy destroy our notifier.
|
||||
*/
|
||||
static void amdgpu_mn_release(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
|
||||
INIT_WORK(&rmn->work, amdgpu_mn_destroy);
|
||||
schedule_work(&rmn->work);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
|
||||
*
|
||||
* @mn: our notifier
|
||||
* @mn: the mm this callback is about
|
||||
* @start: start of updated range
|
||||
* @end: end of updated range
|
||||
*
|
||||
* We block for all BOs between start and end to be idle and
|
||||
* unmap them by move them into system domain again.
|
||||
*/
|
||||
static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
|
||||
struct interval_tree_node *it;
|
||||
|
||||
/* notification is exclusive, but interval is inclusive */
|
||||
end -= 1;
|
||||
|
||||
mutex_lock(&rmn->lock);
|
||||
|
||||
it = interval_tree_iter_first(&rmn->objects, start, end);
|
||||
while (it) {
|
||||
struct amdgpu_mn_node *node;
|
||||
struct amdgpu_bo *bo;
|
||||
int r;
|
||||
|
||||
node = container_of(it, struct amdgpu_mn_node, it);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
|
||||
list_for_each_entry(bo, &node->bos, mn_list) {
|
||||
|
||||
r = amdgpu_bo_reserve(bo, true);
|
||||
if (r) {
|
||||
DRM_ERROR("(%d) failed to reserve user bo\n", r);
|
||||
continue;
|
||||
}
|
||||
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
||||
true, false, MAX_SCHEDULE_TIMEOUT);
|
||||
if (r)
|
||||
DRM_ERROR("(%d) failed to wait for user bo\n", r);
|
||||
|
||||
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (r)
|
||||
DRM_ERROR("(%d) failed to validate user bo\n", r);
|
||||
|
||||
amdgpu_bo_unreserve(bo);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&rmn->lock);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops amdgpu_mn_ops = {
|
||||
.release = amdgpu_mn_release,
|
||||
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
|
||||
};
|
||||
|
||||
/**
|
||||
* amdgpu_mn_get - create notifier context
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Creates a notifier context for current->mm.
|
||||
*/
|
||||
static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct amdgpu_mn *rmn;
|
||||
int r;
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
mutex_lock(&adev->mn_lock);
|
||||
|
||||
hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
|
||||
if (rmn->mm == mm)
|
||||
goto release_locks;
|
||||
|
||||
rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
|
||||
if (!rmn) {
|
||||
rmn = ERR_PTR(-ENOMEM);
|
||||
goto release_locks;
|
||||
}
|
||||
|
||||
rmn->adev = adev;
|
||||
rmn->mm = mm;
|
||||
rmn->mn.ops = &amdgpu_mn_ops;
|
||||
mutex_init(&rmn->lock);
|
||||
rmn->objects = RB_ROOT;
|
||||
|
||||
r = __mmu_notifier_register(&rmn->mn, mm);
|
||||
if (r)
|
||||
goto free_rmn;
|
||||
|
||||
hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
|
||||
|
||||
release_locks:
|
||||
mutex_unlock(&adev->mn_lock);
|
||||
up_write(&mm->mmap_sem);
|
||||
|
||||
return rmn;
|
||||
|
||||
free_rmn:
|
||||
mutex_unlock(&adev->mn_lock);
|
||||
up_write(&mm->mmap_sem);
|
||||
kfree(rmn);
|
||||
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mn_register - register a BO for notifier updates
|
||||
*
|
||||
* @bo: amdgpu buffer object
|
||||
* @addr: userptr addr we should monitor
|
||||
*
|
||||
* Registers an MMU notifier for the given BO at the specified address.
|
||||
* Returns 0 on success, -ERRNO if anything goes wrong.
|
||||
*/
|
||||
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
||||
{
|
||||
unsigned long end = addr + amdgpu_bo_size(bo) - 1;
|
||||
struct amdgpu_device *adev = bo->adev;
|
||||
struct amdgpu_mn *rmn;
|
||||
struct amdgpu_mn_node *node = NULL;
|
||||
struct list_head bos;
|
||||
struct interval_tree_node *it;
|
||||
|
||||
rmn = amdgpu_mn_get(adev);
|
||||
if (IS_ERR(rmn))
|
||||
return PTR_ERR(rmn);
|
||||
|
||||
INIT_LIST_HEAD(&bos);
|
||||
|
||||
mutex_lock(&rmn->lock);
|
||||
|
||||
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
|
||||
kfree(node);
|
||||
node = container_of(it, struct amdgpu_mn_node, it);
|
||||
interval_tree_remove(&node->it, &rmn->objects);
|
||||
addr = min(it->start, addr);
|
||||
end = max(it->last, end);
|
||||
list_splice(&node->bos, &bos);
|
||||
}
|
||||
|
||||
if (!node) {
|
||||
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
|
||||
if (!node) {
|
||||
mutex_unlock(&rmn->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
bo->mn = rmn;
|
||||
|
||||
node->it.start = addr;
|
||||
node->it.last = end;
|
||||
INIT_LIST_HEAD(&node->bos);
|
||||
list_splice(&bos, &node->bos);
|
||||
list_add(&bo->mn_list, &node->bos);
|
||||
|
||||
interval_tree_insert(&node->it, &rmn->objects);
|
||||
|
||||
mutex_unlock(&rmn->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mn_unregister - unregister a BO for notifier updates
|
||||
*
|
||||
* @bo: amdgpu buffer object
|
||||
*
|
||||
* Remove any registration of MMU notifier updates from the buffer object.
|
||||
*/
|
||||
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = bo->adev;
|
||||
struct amdgpu_mn *rmn;
|
||||
struct list_head *head;
|
||||
|
||||
mutex_lock(&adev->mn_lock);
|
||||
rmn = bo->mn;
|
||||
if (rmn == NULL) {
|
||||
mutex_unlock(&adev->mn_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&rmn->lock);
|
||||
/* save the next list entry for later */
|
||||
head = bo->mn_list.next;
|
||||
|
||||
bo->mn = NULL;
|
||||
list_del(&bo->mn_list);
|
||||
|
||||
if (list_empty(head)) {
|
||||
struct amdgpu_mn_node *node;
|
||||
node = container_of(head, struct amdgpu_mn_node, bos);
|
||||
interval_tree_remove(&node->it, &rmn->objects);
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
mutex_unlock(&rmn->lock);
|
||||
mutex_unlock(&adev->mn_lock);
|
||||
}
|
|
@ -0,0 +1,586 @@
|
|||
/*
|
||||
* Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
|
||||
* VA Linux Systems Inc., Fremont, California.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Original Authors:
|
||||
* Kevin E. Martin, Rickard E. Faith, Alan Hourihane
|
||||
*
|
||||
* Kernel port Author: Dave Airlie
|
||||
*/
|
||||
|
||||
#ifndef AMDGPU_MODE_H
|
||||
#define AMDGPU_MODE_H
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_dp_helper.h>
|
||||
#include <drm/drm_fixed.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/i2c-algo-bit.h>
|
||||
|
||||
struct amdgpu_bo;
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_encoder;
|
||||
struct amdgpu_router;
|
||||
struct amdgpu_hpd;
|
||||
|
||||
#define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base)
|
||||
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
|
||||
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
|
||||
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
|
||||
|
||||
#define AMDGPU_MAX_HPD_PINS 6
|
||||
#define AMDGPU_MAX_CRTCS 6
|
||||
#define AMDGPU_MAX_AFMT_BLOCKS 7
|
||||
|
||||
enum amdgpu_rmx_type {
|
||||
RMX_OFF,
|
||||
RMX_FULL,
|
||||
RMX_CENTER,
|
||||
RMX_ASPECT
|
||||
};
|
||||
|
||||
enum amdgpu_underscan_type {
|
||||
UNDERSCAN_OFF,
|
||||
UNDERSCAN_ON,
|
||||
UNDERSCAN_AUTO,
|
||||
};
|
||||
|
||||
#define AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS 50
|
||||
#define AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS 10
|
||||
|
||||
enum amdgpu_hpd_id {
|
||||
AMDGPU_HPD_1 = 0,
|
||||
AMDGPU_HPD_2,
|
||||
AMDGPU_HPD_3,
|
||||
AMDGPU_HPD_4,
|
||||
AMDGPU_HPD_5,
|
||||
AMDGPU_HPD_6,
|
||||
AMDGPU_HPD_LAST,
|
||||
AMDGPU_HPD_NONE = 0xff,
|
||||
};
|
||||
|
||||
enum amdgpu_crtc_irq {
|
||||
AMDGPU_CRTC_IRQ_VBLANK1 = 0,
|
||||
AMDGPU_CRTC_IRQ_VBLANK2,
|
||||
AMDGPU_CRTC_IRQ_VBLANK3,
|
||||
AMDGPU_CRTC_IRQ_VBLANK4,
|
||||
AMDGPU_CRTC_IRQ_VBLANK5,
|
||||
AMDGPU_CRTC_IRQ_VBLANK6,
|
||||
AMDGPU_CRTC_IRQ_VLINE1,
|
||||
AMDGPU_CRTC_IRQ_VLINE2,
|
||||
AMDGPU_CRTC_IRQ_VLINE3,
|
||||
AMDGPU_CRTC_IRQ_VLINE4,
|
||||
AMDGPU_CRTC_IRQ_VLINE5,
|
||||
AMDGPU_CRTC_IRQ_VLINE6,
|
||||
AMDGPU_CRTC_IRQ_LAST,
|
||||
AMDGPU_CRTC_IRQ_NONE = 0xff
|
||||
};
|
||||
|
||||
enum amdgpu_pageflip_irq {
|
||||
AMDGPU_PAGEFLIP_IRQ_D1 = 0,
|
||||
AMDGPU_PAGEFLIP_IRQ_D2,
|
||||
AMDGPU_PAGEFLIP_IRQ_D3,
|
||||
AMDGPU_PAGEFLIP_IRQ_D4,
|
||||
AMDGPU_PAGEFLIP_IRQ_D5,
|
||||
AMDGPU_PAGEFLIP_IRQ_D6,
|
||||
AMDGPU_PAGEFLIP_IRQ_LAST,
|
||||
AMDGPU_PAGEFLIP_IRQ_NONE = 0xff
|
||||
};
|
||||
|
||||
enum amdgpu_flip_status {
|
||||
AMDGPU_FLIP_NONE,
|
||||
AMDGPU_FLIP_PENDING,
|
||||
AMDGPU_FLIP_SUBMITTED
|
||||
};
|
||||
|
||||
#define AMDGPU_MAX_I2C_BUS 16
|
||||
|
||||
/* amdgpu gpio-based i2c
|
||||
* 1. "mask" reg and bits
|
||||
* grabs the gpio pins for software use
|
||||
* 0=not held 1=held
|
||||
* 2. "a" reg and bits
|
||||
* output pin value
|
||||
* 0=low 1=high
|
||||
* 3. "en" reg and bits
|
||||
* sets the pin direction
|
||||
* 0=input 1=output
|
||||
* 4. "y" reg and bits
|
||||
* input pin value
|
||||
* 0=low 1=high
|
||||
*/
|
||||
struct amdgpu_i2c_bus_rec {
|
||||
bool valid;
|
||||
/* id used by atom */
|
||||
uint8_t i2c_id;
|
||||
/* id used by atom */
|
||||
enum amdgpu_hpd_id hpd;
|
||||
/* can be used with hw i2c engine */
|
||||
bool hw_capable;
|
||||
/* uses multi-media i2c engine */
|
||||
bool mm_i2c;
|
||||
/* regs and bits */
|
||||
uint32_t mask_clk_reg;
|
||||
uint32_t mask_data_reg;
|
||||
uint32_t a_clk_reg;
|
||||
uint32_t a_data_reg;
|
||||
uint32_t en_clk_reg;
|
||||
uint32_t en_data_reg;
|
||||
uint32_t y_clk_reg;
|
||||
uint32_t y_data_reg;
|
||||
uint32_t mask_clk_mask;
|
||||
uint32_t mask_data_mask;
|
||||
uint32_t a_clk_mask;
|
||||
uint32_t a_data_mask;
|
||||
uint32_t en_clk_mask;
|
||||
uint32_t en_data_mask;
|
||||
uint32_t y_clk_mask;
|
||||
uint32_t y_data_mask;
|
||||
};
|
||||
|
||||
#define AMDGPU_MAX_BIOS_CONNECTOR 16
|
||||
|
||||
/* pll flags */
|
||||
#define AMDGPU_PLL_USE_BIOS_DIVS (1 << 0)
|
||||
#define AMDGPU_PLL_NO_ODD_POST_DIV (1 << 1)
|
||||
#define AMDGPU_PLL_USE_REF_DIV (1 << 2)
|
||||
#define AMDGPU_PLL_LEGACY (1 << 3)
|
||||
#define AMDGPU_PLL_PREFER_LOW_REF_DIV (1 << 4)
|
||||
#define AMDGPU_PLL_PREFER_HIGH_REF_DIV (1 << 5)
|
||||
#define AMDGPU_PLL_PREFER_LOW_FB_DIV (1 << 6)
|
||||
#define AMDGPU_PLL_PREFER_HIGH_FB_DIV (1 << 7)
|
||||
#define AMDGPU_PLL_PREFER_LOW_POST_DIV (1 << 8)
|
||||
#define AMDGPU_PLL_PREFER_HIGH_POST_DIV (1 << 9)
|
||||
#define AMDGPU_PLL_USE_FRAC_FB_DIV (1 << 10)
|
||||
#define AMDGPU_PLL_PREFER_CLOSEST_LOWER (1 << 11)
|
||||
#define AMDGPU_PLL_USE_POST_DIV (1 << 12)
|
||||
#define AMDGPU_PLL_IS_LCD (1 << 13)
|
||||
#define AMDGPU_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
|
||||
|
||||
struct amdgpu_pll {
|
||||
/* reference frequency */
|
||||
uint32_t reference_freq;
|
||||
|
||||
/* fixed dividers */
|
||||
uint32_t reference_div;
|
||||
uint32_t post_div;
|
||||
|
||||
/* pll in/out limits */
|
||||
uint32_t pll_in_min;
|
||||
uint32_t pll_in_max;
|
||||
uint32_t pll_out_min;
|
||||
uint32_t pll_out_max;
|
||||
uint32_t lcd_pll_out_min;
|
||||
uint32_t lcd_pll_out_max;
|
||||
uint32_t best_vco;
|
||||
|
||||
/* divider limits */
|
||||
uint32_t min_ref_div;
|
||||
uint32_t max_ref_div;
|
||||
uint32_t min_post_div;
|
||||
uint32_t max_post_div;
|
||||
uint32_t min_feedback_div;
|
||||
uint32_t max_feedback_div;
|
||||
uint32_t min_frac_feedback_div;
|
||||
uint32_t max_frac_feedback_div;
|
||||
|
||||
/* flags for the current clock */
|
||||
uint32_t flags;
|
||||
|
||||
/* pll id */
|
||||
uint32_t id;
|
||||
};
|
||||
|
||||
struct amdgpu_i2c_chan {
|
||||
struct i2c_adapter adapter;
|
||||
struct drm_device *dev;
|
||||
struct i2c_algo_bit_data bit;
|
||||
struct amdgpu_i2c_bus_rec rec;
|
||||
struct drm_dp_aux aux;
|
||||
bool has_aux;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
struct amdgpu_fbdev;
|
||||
|
||||
struct amdgpu_afmt {
|
||||
bool enabled;
|
||||
int offset;
|
||||
bool last_buffer_filled_status;
|
||||
int id;
|
||||
struct amdgpu_audio_pin *pin;
|
||||
};
|
||||
|
||||
/*
|
||||
* Audio
|
||||
*/
|
||||
struct amdgpu_audio_pin {
|
||||
int channels;
|
||||
int rate;
|
||||
int bits_per_sample;
|
||||
u8 status_bits;
|
||||
u8 category_code;
|
||||
u32 offset;
|
||||
bool connected;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct amdgpu_audio {
|
||||
bool enabled;
|
||||
struct amdgpu_audio_pin pin[AMDGPU_MAX_AFMT_BLOCKS];
|
||||
int num_pins;
|
||||
};
|
||||
|
||||
struct amdgpu_mode_mc_save {
|
||||
u32 vga_render_control;
|
||||
u32 vga_hdp_control;
|
||||
bool crtc_enabled[AMDGPU_MAX_CRTCS];
|
||||
};
|
||||
|
||||
struct amdgpu_display_funcs {
|
||||
/* vga render */
|
||||
void (*set_vga_render_state)(struct amdgpu_device *adev, bool render);
|
||||
/* display watermarks */
|
||||
void (*bandwidth_update)(struct amdgpu_device *adev);
|
||||
/* get frame count */
|
||||
u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
|
||||
/* wait for vblank */
|
||||
void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
|
||||
/* is dce hung */
|
||||
bool (*is_display_hung)(struct amdgpu_device *adev);
|
||||
/* set backlight level */
|
||||
void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
|
||||
u8 level);
|
||||
/* get backlight level */
|
||||
u8 (*backlight_get_level)(struct amdgpu_encoder *amdgpu_encoder);
|
||||
/* hotplug detect */
|
||||
bool (*hpd_sense)(struct amdgpu_device *adev, enum amdgpu_hpd_id hpd);
|
||||
void (*hpd_set_polarity)(struct amdgpu_device *adev,
|
||||
enum amdgpu_hpd_id hpd);
|
||||
u32 (*hpd_get_gpio_reg)(struct amdgpu_device *adev);
|
||||
/* pageflipping */
|
||||
void (*page_flip)(struct amdgpu_device *adev,
|
||||
int crtc_id, u64 crtc_base);
|
||||
int (*page_flip_get_scanoutpos)(struct amdgpu_device *adev, int crtc,
|
||||
u32 *vbl, u32 *position);
|
||||
/* display topology setup */
|
||||
void (*add_encoder)(struct amdgpu_device *adev,
|
||||
uint32_t encoder_enum,
|
||||
uint32_t supported_device,
|
||||
u16 caps);
|
||||
void (*add_connector)(struct amdgpu_device *adev,
|
||||
uint32_t connector_id,
|
||||
uint32_t supported_device,
|
||||
int connector_type,
|
||||
struct amdgpu_i2c_bus_rec *i2c_bus,
|
||||
uint16_t connector_object_id,
|
||||
struct amdgpu_hpd *hpd,
|
||||
struct amdgpu_router *router);
|
||||
void (*stop_mc_access)(struct amdgpu_device *adev,
|
||||
struct amdgpu_mode_mc_save *save);
|
||||
void (*resume_mc_access)(struct amdgpu_device *adev,
|
||||
struct amdgpu_mode_mc_save *save);
|
||||
};
|
||||
|
||||
struct amdgpu_mode_info {
|
||||
struct atom_context *atom_context;
|
||||
struct card_info *atom_card_info;
|
||||
bool mode_config_initialized;
|
||||
struct amdgpu_crtc *crtcs[6];
|
||||
struct amdgpu_afmt *afmt[7];
|
||||
/* DVI-I properties */
|
||||
struct drm_property *coherent_mode_property;
|
||||
/* DAC enable load detect */
|
||||
struct drm_property *load_detect_property;
|
||||
/* underscan */
|
||||
struct drm_property *underscan_property;
|
||||
struct drm_property *underscan_hborder_property;
|
||||
struct drm_property *underscan_vborder_property;
|
||||
/* audio */
|
||||
struct drm_property *audio_property;
|
||||
/* FMT dithering */
|
||||
struct drm_property *dither_property;
|
||||
/* hardcoded DFP edid from BIOS */
|
||||
struct edid *bios_hardcoded_edid;
|
||||
int bios_hardcoded_edid_size;
|
||||
|
||||
/* pointer to fbdev info structure */
|
||||
struct amdgpu_fbdev *rfbdev;
|
||||
/* firmware flags */
|
||||
u16 firmware_flags;
|
||||
/* pointer to backlight encoder */
|
||||
struct amdgpu_encoder *bl_encoder;
|
||||
struct amdgpu_audio audio; /* audio stuff */
|
||||
int num_crtc; /* number of crtcs */
|
||||
int num_hpd; /* number of hpd pins */
|
||||
int num_dig; /* number of dig blocks */
|
||||
int disp_priority;
|
||||
const struct amdgpu_display_funcs *funcs;
|
||||
};
|
||||
|
||||
#define AMDGPU_MAX_BL_LEVEL 0xFF
|
||||
|
||||
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
|
||||
|
||||
struct amdgpu_backlight_privdata {
|
||||
struct amdgpu_encoder *encoder;
|
||||
uint8_t negative;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
struct amdgpu_atom_ss {
|
||||
uint16_t percentage;
|
||||
uint16_t percentage_divider;
|
||||
uint8_t type;
|
||||
uint16_t step;
|
||||
uint8_t delay;
|
||||
uint8_t range;
|
||||
uint8_t refdiv;
|
||||
/* asic_ss */
|
||||
uint16_t rate;
|
||||
uint16_t amount;
|
||||
};
|
||||
|
||||
struct amdgpu_crtc {
|
||||
struct drm_crtc base;
|
||||
int crtc_id;
|
||||
u16 lut_r[256], lut_g[256], lut_b[256];
|
||||
bool enabled;
|
||||
bool can_tile;
|
||||
uint32_t crtc_offset;
|
||||
struct drm_gem_object *cursor_bo;
|
||||
uint64_t cursor_addr;
|
||||
int cursor_width;
|
||||
int cursor_height;
|
||||
int max_cursor_width;
|
||||
int max_cursor_height;
|
||||
enum amdgpu_rmx_type rmx_type;
|
||||
u8 h_border;
|
||||
u8 v_border;
|
||||
fixed20_12 vsc;
|
||||
fixed20_12 hsc;
|
||||
struct drm_display_mode native_mode;
|
||||
u32 pll_id;
|
||||
/* page flipping */
|
||||
struct workqueue_struct *pflip_queue;
|
||||
struct amdgpu_flip_work *pflip_works;
|
||||
enum amdgpu_flip_status pflip_status;
|
||||
int deferred_flip_completion;
|
||||
/* pll sharing */
|
||||
struct amdgpu_atom_ss ss;
|
||||
bool ss_enabled;
|
||||
u32 adjusted_clock;
|
||||
int bpc;
|
||||
u32 pll_reference_div;
|
||||
u32 pll_post_div;
|
||||
u32 pll_flags;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_connector *connector;
|
||||
/* for dpm */
|
||||
u32 line_time;
|
||||
u32 wm_low;
|
||||
u32 wm_high;
|
||||
struct drm_display_mode hw_mode;
|
||||
};
|
||||
|
||||
struct amdgpu_encoder_atom_dig {
|
||||
bool linkb;
|
||||
/* atom dig */
|
||||
bool coherent_mode;
|
||||
int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
|
||||
/* atom lvds/edp */
|
||||
uint32_t lcd_misc;
|
||||
uint16_t panel_pwr_delay;
|
||||
uint32_t lcd_ss_id;
|
||||
/* panel mode */
|
||||
struct drm_display_mode native_mode;
|
||||
struct backlight_device *bl_dev;
|
||||
int dpms_mode;
|
||||
uint8_t backlight_level;
|
||||
int panel_mode;
|
||||
struct amdgpu_afmt *afmt;
|
||||
};
|
||||
|
||||
struct amdgpu_encoder {
|
||||
struct drm_encoder base;
|
||||
uint32_t encoder_enum;
|
||||
uint32_t encoder_id;
|
||||
uint32_t devices;
|
||||
uint32_t active_device;
|
||||
uint32_t flags;
|
||||
uint32_t pixel_clock;
|
||||
enum amdgpu_rmx_type rmx_type;
|
||||
enum amdgpu_underscan_type underscan_type;
|
||||
uint32_t underscan_hborder;
|
||||
uint32_t underscan_vborder;
|
||||
struct drm_display_mode native_mode;
|
||||
void *enc_priv;
|
||||
int audio_polling_active;
|
||||
bool is_ext_encoder;
|
||||
u16 caps;
|
||||
};
|
||||
|
||||
struct amdgpu_connector_atom_dig {
|
||||
/* displayport */
|
||||
u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
u8 dp_sink_type;
|
||||
int dp_clock;
|
||||
int dp_lane_count;
|
||||
bool edp_on;
|
||||
};
|
||||
|
||||
struct amdgpu_gpio_rec {
|
||||
bool valid;
|
||||
u8 id;
|
||||
u32 reg;
|
||||
u32 mask;
|
||||
u32 shift;
|
||||
};
|
||||
|
||||
struct amdgpu_hpd {
|
||||
enum amdgpu_hpd_id hpd;
|
||||
u8 plugged_state;
|
||||
struct amdgpu_gpio_rec gpio;
|
||||
};
|
||||
|
||||
struct amdgpu_router {
|
||||
u32 router_id;
|
||||
struct amdgpu_i2c_bus_rec i2c_info;
|
||||
u8 i2c_addr;
|
||||
/* i2c mux */
|
||||
bool ddc_valid;
|
||||
u8 ddc_mux_type;
|
||||
u8 ddc_mux_control_pin;
|
||||
u8 ddc_mux_state;
|
||||
/* clock/data mux */
|
||||
bool cd_valid;
|
||||
u8 cd_mux_type;
|
||||
u8 cd_mux_control_pin;
|
||||
u8 cd_mux_state;
|
||||
};
|
||||
|
||||
enum amdgpu_connector_audio {
|
||||
AMDGPU_AUDIO_DISABLE = 0,
|
||||
AMDGPU_AUDIO_ENABLE = 1,
|
||||
AMDGPU_AUDIO_AUTO = 2
|
||||
};
|
||||
|
||||
enum amdgpu_connector_dither {
|
||||
AMDGPU_FMT_DITHER_DISABLE = 0,
|
||||
AMDGPU_FMT_DITHER_ENABLE = 1,
|
||||
};
|
||||
|
||||
struct amdgpu_connector {
|
||||
struct drm_connector base;
|
||||
uint32_t connector_id;
|
||||
uint32_t devices;
|
||||
struct amdgpu_i2c_chan *ddc_bus;
|
||||
/* some systems have an hdmi and vga port with a shared ddc line */
|
||||
bool shared_ddc;
|
||||
bool use_digital;
|
||||
/* we need to mind the EDID between detect
|
||||
and get modes due to analog/digital/tvencoder */
|
||||
struct edid *edid;
|
||||
void *con_priv;
|
||||
bool dac_load_detect;
|
||||
bool detected_by_load; /* if the connection status was determined by load */
|
||||
uint16_t connector_object_id;
|
||||
struct amdgpu_hpd hpd;
|
||||
struct amdgpu_router router;
|
||||
struct amdgpu_i2c_chan *router_bus;
|
||||
enum amdgpu_connector_audio audio;
|
||||
enum amdgpu_connector_dither dither;
|
||||
unsigned pixelclock_for_modeset;
|
||||
};
|
||||
|
||||
struct amdgpu_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct drm_gem_object *obj;
|
||||
};
|
||||
|
||||
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
|
||||
((em) == ATOM_ENCODER_MODE_DP_MST))
|
||||
|
||||
void amdgpu_link_encoder_connector(struct drm_device *dev);
|
||||
|
||||
struct drm_connector *
|
||||
amdgpu_get_connector_for_encoder(struct drm_encoder *encoder);
|
||||
struct drm_connector *
|
||||
amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder);
|
||||
bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
|
||||
u32 pixel_clock);
|
||||
|
||||
u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
|
||||
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder);
|
||||
|
||||
bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux);
|
||||
|
||||
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder);
|
||||
|
||||
int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
|
||||
unsigned int flags,
|
||||
int *vpos, int *hpos, ktime_t *stime,
|
||||
ktime_t *etime);
|
||||
|
||||
int amdgpu_framebuffer_init(struct drm_device *dev,
|
||||
struct amdgpu_framebuffer *rfb,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *obj);
|
||||
|
||||
int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
|
||||
|
||||
void amdgpu_enc_destroy(struct drm_encoder *encoder);
|
||||
void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
|
||||
bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
|
||||
|
||||
/* fbdev layer */
|
||||
int amdgpu_fbdev_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fbdev_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
|
||||
int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
|
||||
bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
|
||||
|
||||
void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
|
||||
|
||||
|
||||
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
|
||||
|
||||
/* amdgpu_display.c */
|
||||
void amdgpu_print_display_setup(struct drm_device *dev);
|
||||
int amdgpu_modeset_create_props(struct amdgpu_device *adev);
|
||||
int amdgpu_crtc_set_config(struct drm_mode_set *set);
|
||||
int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event,
|
||||
uint32_t page_flip_flags);
|
||||
extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
|
||||
|
||||
#endif
|
|
@ -0,0 +1,646 @@
|
|||
/*
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Jerome Glisse <glisse@freedesktop.org>
|
||||
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
|
||||
* Dave Airlie
|
||||
*/
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
|
||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
||||
|
||||
static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
|
||||
struct ttm_mem_reg * mem)
|
||||
{
|
||||
u64 ret = 0;
|
||||
if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
|
||||
ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
|
||||
adev->mc.visible_vram_size ?
|
||||
adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT):
|
||||
mem->size;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
|
||||
struct ttm_mem_reg *old_mem,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
u64 vis_size;
|
||||
if (!adev)
|
||||
return;
|
||||
|
||||
if (new_mem) {
|
||||
switch (new_mem->mem_type) {
|
||||
case TTM_PL_TT:
|
||||
atomic64_add(new_mem->size, &adev->gtt_usage);
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
atomic64_add(new_mem->size, &adev->vram_usage);
|
||||
vis_size = amdgpu_get_vis_part_size(adev, new_mem);
|
||||
atomic64_add(vis_size, &adev->vram_vis_usage);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (old_mem) {
|
||||
switch (old_mem->mem_type) {
|
||||
case TTM_PL_TT:
|
||||
atomic64_sub(old_mem->size, &adev->gtt_usage);
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
atomic64_sub(old_mem->size, &adev->vram_usage);
|
||||
vis_size = amdgpu_get_vis_part_size(adev, old_mem);
|
||||
atomic64_sub(vis_size, &adev->vram_vis_usage);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_bo *bo;
|
||||
|
||||
bo = container_of(tbo, struct amdgpu_bo, tbo);
|
||||
|
||||
amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
|
||||
amdgpu_mn_unregister(bo);
|
||||
|
||||
mutex_lock(&bo->adev->gem.mutex);
|
||||
list_del_init(&bo->list);
|
||||
mutex_unlock(&bo->adev->gem.mutex);
|
||||
drm_gem_object_release(&bo->gem_base);
|
||||
kfree(bo->metadata);
|
||||
kfree(bo);
|
||||
}
|
||||
|
||||
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
|
||||
{
|
||||
if (bo->destroy == &amdgpu_ttm_bo_destroy)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
|
||||
{
|
||||
u32 c = 0, i;
|
||||
rbo->placement.placement = rbo->placements;
|
||||
rbo->placement.busy_placement = rbo->placements;
|
||||
|
||||
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
if (rbo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
|
||||
rbo->adev->mc.visible_vram_size < rbo->adev->mc.real_vram_size) {
|
||||
rbo->placements[c].fpfn =
|
||||
rbo->adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_VRAM;
|
||||
}
|
||||
rbo->placements[c].fpfn = 0;
|
||||
rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_VRAM;
|
||||
}
|
||||
|
||||
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
|
||||
if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_UC) {
|
||||
rbo->placements[c].fpfn = 0;
|
||||
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
|
||||
} else if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_WC) {
|
||||
rbo->placements[c].fpfn = 0;
|
||||
rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
|
||||
TTM_PL_FLAG_UNCACHED;
|
||||
} else {
|
||||
rbo->placements[c].fpfn = 0;
|
||||
rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
|
||||
}
|
||||
}
|
||||
|
||||
if (domain & AMDGPU_GEM_DOMAIN_CPU) {
|
||||
if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_UC) {
|
||||
rbo->placements[c].fpfn = 0;
|
||||
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
|
||||
} else if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_WC) {
|
||||
rbo->placements[c].fpfn = 0;
|
||||
rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
|
||||
TTM_PL_FLAG_UNCACHED;
|
||||
} else {
|
||||
rbo->placements[c].fpfn = 0;
|
||||
rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (domain & AMDGPU_GEM_DOMAIN_GDS) {
|
||||
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
|
||||
AMDGPU_PL_FLAG_GDS;
|
||||
}
|
||||
if (domain & AMDGPU_GEM_DOMAIN_GWS) {
|
||||
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
|
||||
AMDGPU_PL_FLAG_GWS;
|
||||
}
|
||||
if (domain & AMDGPU_GEM_DOMAIN_OA) {
|
||||
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
|
||||
AMDGPU_PL_FLAG_OA;
|
||||
}
|
||||
|
||||
if (!c) {
|
||||
rbo->placements[c].fpfn = 0;
|
||||
rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
|
||||
TTM_PL_FLAG_SYSTEM;
|
||||
}
|
||||
rbo->placement.num_placement = c;
|
||||
rbo->placement.num_busy_placement = c;
|
||||
|
||||
for (i = 0; i < c; i++) {
|
||||
if ((rbo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
|
||||
(rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
|
||||
!rbo->placements[i].fpfn)
|
||||
rbo->placements[i].lpfn =
|
||||
rbo->adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
else
|
||||
rbo->placements[i].lpfn = 0;
|
||||
}
|
||||
|
||||
if (rbo->tbo.mem.size > 512 * 1024) {
|
||||
for (i = 0; i < c; i++) {
|
||||
rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags,
|
||||
struct sg_table *sg, struct amdgpu_bo **bo_ptr)
|
||||
{
|
||||
struct amdgpu_bo *bo;
|
||||
enum ttm_bo_type type;
|
||||
unsigned long page_align;
|
||||
size_t acc_size;
|
||||
int r;
|
||||
|
||||
/* VI has a hw bug where VM PTEs have to be allocated in groups of 8.
|
||||
* do this as a temporary workaround
|
||||
*/
|
||||
if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
|
||||
if (adev->asic_type >= CHIP_TOPAZ) {
|
||||
if (byte_align & 0x7fff)
|
||||
byte_align = ALIGN(byte_align, 0x8000);
|
||||
if (size & 0x7fff)
|
||||
size = ALIGN(size, 0x8000);
|
||||
}
|
||||
}
|
||||
|
||||
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
|
||||
if (kernel) {
|
||||
type = ttm_bo_type_kernel;
|
||||
} else if (sg) {
|
||||
type = ttm_bo_type_sg;
|
||||
} else {
|
||||
type = ttm_bo_type_device;
|
||||
}
|
||||
*bo_ptr = NULL;
|
||||
|
||||
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
|
||||
sizeof(struct amdgpu_bo));
|
||||
|
||||
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
|
||||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
|
||||
if (unlikely(r)) {
|
||||
kfree(bo);
|
||||
return r;
|
||||
}
|
||||
bo->adev = adev;
|
||||
INIT_LIST_HEAD(&bo->list);
|
||||
INIT_LIST_HEAD(&bo->va);
|
||||
bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM |
|
||||
AMDGPU_GEM_DOMAIN_GTT |
|
||||
AMDGPU_GEM_DOMAIN_CPU |
|
||||
AMDGPU_GEM_DOMAIN_GDS |
|
||||
AMDGPU_GEM_DOMAIN_GWS |
|
||||
AMDGPU_GEM_DOMAIN_OA);
|
||||
|
||||
bo->flags = flags;
|
||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
down_read(&adev->pm.mclk_lock);
|
||||
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, !kernel, NULL,
|
||||
acc_size, sg, NULL, &amdgpu_ttm_bo_destroy);
|
||||
up_read(&adev->pm.mclk_lock);
|
||||
if (unlikely(r != 0)) {
|
||||
return r;
|
||||
}
|
||||
*bo_ptr = bo;
|
||||
|
||||
trace_amdgpu_bo_create(bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
||||
{
|
||||
bool is_iomem;
|
||||
int r;
|
||||
|
||||
if (bo->kptr) {
|
||||
if (ptr) {
|
||||
*ptr = bo->kptr;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
|
||||
if (ptr) {
|
||||
*ptr = bo->kptr;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
|
||||
{
|
||||
if (bo->kptr == NULL)
|
||||
return;
|
||||
bo->kptr = NULL;
|
||||
ttm_bo_kunmap(&bo->kmap);
|
||||
}
|
||||
|
||||
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
|
||||
{
|
||||
if (bo == NULL)
|
||||
return NULL;
|
||||
|
||||
ttm_bo_reference(&bo->tbo);
|
||||
return bo;
|
||||
}
|
||||
|
||||
void amdgpu_bo_unref(struct amdgpu_bo **bo)
|
||||
{
|
||||
struct ttm_buffer_object *tbo;
|
||||
|
||||
if ((*bo) == NULL)
|
||||
return;
|
||||
|
||||
tbo = &((*bo)->tbo);
|
||||
ttm_bo_unref(&tbo);
|
||||
if (tbo == NULL)
|
||||
*bo = NULL;
|
||||
}
|
||||
|
||||
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 max_offset,
|
||||
u64 *gpu_addr)
|
||||
{
|
||||
int r, i;
|
||||
|
||||
if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
|
||||
return -EPERM;
|
||||
|
||||
if (bo->pin_count) {
|
||||
bo->pin_count++;
|
||||
if (gpu_addr)
|
||||
*gpu_addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
if (max_offset != 0) {
|
||||
u64 domain_start;
|
||||
|
||||
if (domain == AMDGPU_GEM_DOMAIN_VRAM)
|
||||
domain_start = bo->adev->mc.vram_start;
|
||||
else
|
||||
domain_start = bo->adev->mc.gtt_start;
|
||||
WARN_ON_ONCE(max_offset <
|
||||
(amdgpu_bo_gpu_offset(bo) - domain_start));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||
for (i = 0; i < bo->placement.num_placement; i++) {
|
||||
/* force to pin into visible video ram */
|
||||
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
|
||||
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
|
||||
(!max_offset || max_offset > bo->adev->mc.visible_vram_size))
|
||||
bo->placements[i].lpfn =
|
||||
bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
else
|
||||
bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
|
||||
|
||||
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
|
||||
}
|
||||
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (likely(r == 0)) {
|
||||
bo->pin_count = 1;
|
||||
if (gpu_addr != NULL)
|
||||
*gpu_addr = amdgpu_bo_gpu_offset(bo);
|
||||
if (domain == AMDGPU_GEM_DOMAIN_VRAM)
|
||||
bo->adev->vram_pin_size += amdgpu_bo_size(bo);
|
||||
else
|
||||
bo->adev->gart_pin_size += amdgpu_bo_size(bo);
|
||||
} else {
|
||||
dev_err(bo->adev->dev, "%p pin failed\n", bo);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
|
||||
{
|
||||
return amdgpu_bo_pin_restricted(bo, domain, 0, gpu_addr);
|
||||
}
|
||||
|
||||
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
|
||||
{
|
||||
int r, i;
|
||||
|
||||
if (!bo->pin_count) {
|
||||
dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
|
||||
return 0;
|
||||
}
|
||||
bo->pin_count--;
|
||||
if (bo->pin_count)
|
||||
return 0;
|
||||
for (i = 0; i < bo->placement.num_placement; i++) {
|
||||
bo->placements[i].lpfn = 0;
|
||||
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
|
||||
}
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (likely(r == 0)) {
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
|
||||
else
|
||||
bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
|
||||
} else {
|
||||
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
|
||||
{
|
||||
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
|
||||
if (0 && (adev->flags & AMDGPU_IS_APU)) {
|
||||
/* Useless to evict on IGP chips */
|
||||
return 0;
|
||||
}
|
||||
return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
}
|
||||
|
||||
void amdgpu_bo_force_delete(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_bo *bo, *n;
|
||||
|
||||
if (list_empty(&adev->gem.objects)) {
|
||||
return;
|
||||
}
|
||||
dev_err(adev->dev, "Userspace still has active objects !\n");
|
||||
list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
|
||||
mutex_lock(&adev->ddev->struct_mutex);
|
||||
dev_err(adev->dev, "%p %p %lu %lu force free\n",
|
||||
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
|
||||
*((unsigned long *)&bo->gem_base.refcount));
|
||||
mutex_lock(&bo->adev->gem.mutex);
|
||||
list_del_init(&bo->list);
|
||||
mutex_unlock(&bo->adev->gem.mutex);
|
||||
/* this should unref the ttm bo */
|
||||
drm_gem_object_unreference(&bo->gem_base);
|
||||
mutex_unlock(&adev->ddev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_bo_init(struct amdgpu_device *adev)
|
||||
{
|
||||
/* Add an MTRR for the VRAM */
|
||||
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
|
||||
adev->mc.aper_size);
|
||||
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
|
||||
adev->mc.mc_vram_size >> 20,
|
||||
(unsigned long long)adev->mc.aper_size >> 20);
|
||||
DRM_INFO("RAM width %dbits DDR\n",
|
||||
adev->mc.vram_width);
|
||||
return amdgpu_ttm_init(adev);
|
||||
}
|
||||
|
||||
void amdgpu_bo_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_ttm_fini(adev);
|
||||
arch_phys_wc_del(adev->mc.vram_mtrr);
|
||||
}
|
||||
|
||||
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return ttm_fbdev_mmap(vma, &bo->tbo);
|
||||
}
|
||||
|
||||
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
|
||||
{
|
||||
unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
|
||||
|
||||
bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK;
|
||||
bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK;
|
||||
mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK;
|
||||
tilesplit = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK;
|
||||
stilesplit = (tiling_flags >> AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK;
|
||||
switch (bankw) {
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
case 8:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
switch (bankh) {
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
case 8:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
switch (mtaspect) {
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
case 8:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (tilesplit > 6) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if (stilesplit > 6) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bo->tiling_flags = tiling_flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
|
||||
{
|
||||
lockdep_assert_held(&bo->tbo.resv->lock.base);
|
||||
|
||||
if (tiling_flags)
|
||||
*tiling_flags = bo->tiling_flags;
|
||||
}
|
||||
|
||||
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
|
||||
uint32_t metadata_size, uint64_t flags)
|
||||
{
|
||||
void *buffer;
|
||||
|
||||
if (!metadata_size) {
|
||||
if (bo->metadata_size) {
|
||||
kfree(bo->metadata);
|
||||
bo->metadata_size = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (metadata == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
buffer = kzalloc(metadata_size, GFP_KERNEL);
|
||||
if (buffer == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(buffer, metadata, metadata_size);
|
||||
|
||||
kfree(bo->metadata);
|
||||
bo->metadata_flags = flags;
|
||||
bo->metadata = buffer;
|
||||
bo->metadata_size = metadata_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
|
||||
size_t buffer_size, uint32_t *metadata_size,
|
||||
uint64_t *flags)
|
||||
{
|
||||
if (!buffer && !metadata_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (buffer) {
|
||||
if (buffer_size < bo->metadata_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (bo->metadata_size)
|
||||
memcpy(buffer, bo->metadata, bo->metadata_size);
|
||||
}
|
||||
|
||||
if (metadata_size)
|
||||
*metadata_size = bo->metadata_size;
|
||||
if (flags)
|
||||
*flags = bo->metadata_flags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct amdgpu_bo *rbo;
|
||||
|
||||
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
|
||||
return;
|
||||
|
||||
rbo = container_of(bo, struct amdgpu_bo, tbo);
|
||||
amdgpu_vm_bo_invalidate(rbo->adev, rbo);
|
||||
|
||||
/* update statistics */
|
||||
if (!new_mem)
|
||||
return;
|
||||
|
||||
/* move_notify is called before move happens */
|
||||
amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
|
||||
}
|
||||
|
||||
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_bo *rbo;
|
||||
unsigned long offset, size;
|
||||
int r;
|
||||
|
||||
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
|
||||
return 0;
|
||||
rbo = container_of(bo, struct amdgpu_bo, tbo);
|
||||
adev = rbo->adev;
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
||||
size = bo->mem.num_pages << PAGE_SHIFT;
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
if ((offset + size) > adev->mc.visible_vram_size) {
|
||||
/* hurrah the memory is not visible ! */
|
||||
amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
rbo->placements[0].lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
r = ttm_bo_validate(bo, &rbo->placement, false, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
/* this should not happen */
|
||||
if ((offset + size) > adev->mc.visible_vram_size)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_fence - add fence to buffer object
|
||||
*
|
||||
* @bo: buffer object in question
|
||||
* @fence: fence to add
|
||||
* @shared: true if fence should be added shared
|
||||
*
|
||||
*/
|
||||
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
|
||||
bool shared)
|
||||
{
|
||||
struct reservation_object *resv = bo->tbo.resv;
|
||||
|
||||
if (shared)
|
||||
reservation_object_add_shared_fence(resv, &fence->base);
|
||||
else
|
||||
reservation_object_add_excl_fence(resv, &fence->base);
|
||||
}
|
|
@ -0,0 +1,196 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#ifndef __AMDGPU_OBJECT_H__
|
||||
#define __AMDGPU_OBJECT_H__
|
||||
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
/**
|
||||
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
|
||||
* @mem_type: ttm memory type
|
||||
*
|
||||
* Returns corresponding domain of the ttm mem_type
|
||||
*/
|
||||
static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
|
||||
{
|
||||
switch (mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
return AMDGPU_GEM_DOMAIN_VRAM;
|
||||
case TTM_PL_TT:
|
||||
return AMDGPU_GEM_DOMAIN_GTT;
|
||||
case TTM_PL_SYSTEM:
|
||||
return AMDGPU_GEM_DOMAIN_CPU;
|
||||
case AMDGPU_PL_GDS:
|
||||
return AMDGPU_GEM_DOMAIN_GDS;
|
||||
case AMDGPU_PL_GWS:
|
||||
return AMDGPU_GEM_DOMAIN_GWS;
|
||||
case AMDGPU_PL_OA:
|
||||
return AMDGPU_GEM_DOMAIN_OA;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_reserve - reserve bo
|
||||
* @bo: bo structure
|
||||
* @no_intr: don't return -ERESTARTSYS on pending signal
|
||||
*
|
||||
* Returns:
|
||||
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
|
||||
* a signal. Release all buffer reservations and return to user-space.
|
||||
*/
|
||||
static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS)
|
||||
dev_err(bo->adev->dev, "%p reserve failed\n", bo);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
|
||||
{
|
||||
ttm_bo_unreserve(&bo->tbo);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_gpu_offset - return GPU offset of bo
|
||||
* @bo: amdgpu object for which we query the offset
|
||||
*
|
||||
* Returns current GPU offset of the object.
|
||||
*
|
||||
* Note: object should either be pinned or reserved when calling this
|
||||
* function, it might be useful to add check for this for debugging.
|
||||
*/
|
||||
static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
||||
{
|
||||
return bo->tbo.offset;
|
||||
}
|
||||
|
||||
static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
|
||||
{
|
||||
return bo->tbo.num_pages << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
|
||||
{
|
||||
return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
|
||||
}
|
||||
|
||||
static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
|
||||
{
|
||||
return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_mmap_offset - return mmap offset of bo
|
||||
* @bo: amdgpu object for which we query the offset
|
||||
*
|
||||
* Returns mmap offset of the object.
|
||||
*/
|
||||
static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
|
||||
{
|
||||
return drm_vma_node_offset_addr(&bo->tbo.vma_node);
|
||||
}
|
||||
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain, u64 flags,
|
||||
struct sg_table *sg,
|
||||
struct amdgpu_bo **bo_ptr);
|
||||
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
|
||||
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
|
||||
void amdgpu_bo_unref(struct amdgpu_bo **bo);
|
||||
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
|
||||
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
u64 max_offset, u64 *gpu_addr);
|
||||
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
|
||||
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
|
||||
void amdgpu_bo_force_delete(struct amdgpu_device *adev);
|
||||
int amdgpu_bo_init(struct amdgpu_device *adev);
|
||||
void amdgpu_bo_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
|
||||
struct vm_area_struct *vma);
|
||||
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
|
||||
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
|
||||
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
|
||||
uint32_t metadata_size, uint64_t flags);
|
||||
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
|
||||
size_t buffer_size, uint32_t *metadata_size,
|
||||
uint64_t *flags);
|
||||
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *new_mem);
|
||||
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
|
||||
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
|
||||
bool shared);
|
||||
|
||||
/*
|
||||
* sub allocation
|
||||
*/
|
||||
|
||||
static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
|
||||
{
|
||||
return sa_bo->manager->gpu_addr + sa_bo->soffset;
|
||||
}
|
||||
|
||||
static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
|
||||
{
|
||||
return sa_bo->manager->cpu_ptr + sa_bo->soffset;
|
||||
}
|
||||
|
||||
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager,
|
||||
unsigned size, u32 align, u32 domain);
|
||||
void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager);
|
||||
int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager);
|
||||
int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager);
|
||||
int amdgpu_sa_bo_new(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager,
|
||||
struct amdgpu_sa_bo **sa_bo,
|
||||
unsigned size, unsigned align);
|
||||
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_bo **sa_bo,
|
||||
struct amdgpu_fence *fence);
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
|
||||
struct seq_file *m);
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
|
@ -0,0 +1,350 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
#include "atombios_encoders.h"
|
||||
#include <asm/div64.h>
|
||||
#include <linux/gcd.h>
|
||||
|
||||
/**
|
||||
* amdgpu_pll_reduce_ratio - fractional number reduction
|
||||
*
|
||||
* @nom: nominator
|
||||
* @den: denominator
|
||||
* @nom_min: minimum value for nominator
|
||||
* @den_min: minimum value for denominator
|
||||
*
|
||||
* Find the greatest common divisor and apply it on both nominator and
|
||||
* denominator, but make nominator and denominator are at least as large
|
||||
* as their minimum values.
|
||||
*/
|
||||
static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
|
||||
unsigned nom_min, unsigned den_min)
|
||||
{
|
||||
unsigned tmp;
|
||||
|
||||
/* reduce the numbers to a simpler ratio */
|
||||
tmp = gcd(*nom, *den);
|
||||
*nom /= tmp;
|
||||
*den /= tmp;
|
||||
|
||||
/* make sure nominator is large enough */
|
||||
if (*nom < nom_min) {
|
||||
tmp = DIV_ROUND_UP(nom_min, *nom);
|
||||
*nom *= tmp;
|
||||
*den *= tmp;
|
||||
}
|
||||
|
||||
/* make sure the denominator is large enough */
|
||||
if (*den < den_min) {
|
||||
tmp = DIV_ROUND_UP(den_min, *den);
|
||||
*nom *= tmp;
|
||||
*den *= tmp;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
|
||||
*
|
||||
* @nom: nominator
|
||||
* @den: denominator
|
||||
* @post_div: post divider
|
||||
* @fb_div_max: feedback divider maximum
|
||||
* @ref_div_max: reference divider maximum
|
||||
* @fb_div: resulting feedback divider
|
||||
* @ref_div: resulting reference divider
|
||||
*
|
||||
* Calculate feedback and reference divider for a given post divider. Makes
|
||||
* sure we stay within the limits.
|
||||
*/
|
||||
static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
|
||||
unsigned fb_div_max, unsigned ref_div_max,
|
||||
unsigned *fb_div, unsigned *ref_div)
|
||||
{
|
||||
/* limit reference * post divider to a maximum */
|
||||
ref_div_max = min(128 / post_div, ref_div_max);
|
||||
|
||||
/* get matching reference and feedback divider */
|
||||
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
|
||||
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
|
||||
|
||||
/* limit fb divider to its maximum */
|
||||
if (*fb_div > fb_div_max) {
|
||||
*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
|
||||
*fb_div = fb_div_max;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_pll_compute - compute PLL paramaters
|
||||
*
|
||||
* @pll: information about the PLL
|
||||
* @dot_clock_p: resulting pixel clock
|
||||
* fb_div_p: resulting feedback divider
|
||||
* frac_fb_div_p: fractional part of the feedback divider
|
||||
* ref_div_p: resulting reference divider
|
||||
* post_div_p: resulting reference divider
|
||||
*
|
||||
* Try to calculate the PLL parameters to generate the given frequency:
|
||||
* dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
|
||||
*/
|
||||
void amdgpu_pll_compute(struct amdgpu_pll *pll,
|
||||
u32 freq,
|
||||
u32 *dot_clock_p,
|
||||
u32 *fb_div_p,
|
||||
u32 *frac_fb_div_p,
|
||||
u32 *ref_div_p,
|
||||
u32 *post_div_p)
|
||||
{
|
||||
unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
|
||||
freq : freq / 10;
|
||||
|
||||
unsigned fb_div_min, fb_div_max, fb_div;
|
||||
unsigned post_div_min, post_div_max, post_div;
|
||||
unsigned ref_div_min, ref_div_max, ref_div;
|
||||
unsigned post_div_best, diff_best;
|
||||
unsigned nom, den;
|
||||
|
||||
/* determine allowed feedback divider range */
|
||||
fb_div_min = pll->min_feedback_div;
|
||||
fb_div_max = pll->max_feedback_div;
|
||||
|
||||
if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
|
||||
fb_div_min *= 10;
|
||||
fb_div_max *= 10;
|
||||
}
|
||||
|
||||
/* determine allowed ref divider range */
|
||||
if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
|
||||
ref_div_min = pll->reference_div;
|
||||
else
|
||||
ref_div_min = pll->min_ref_div;
|
||||
|
||||
if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
|
||||
pll->flags & AMDGPU_PLL_USE_REF_DIV)
|
||||
ref_div_max = pll->reference_div;
|
||||
else
|
||||
ref_div_max = pll->max_ref_div;
|
||||
|
||||
/* determine allowed post divider range */
|
||||
if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
|
||||
post_div_min = pll->post_div;
|
||||
post_div_max = pll->post_div;
|
||||
} else {
|
||||
unsigned vco_min, vco_max;
|
||||
|
||||
if (pll->flags & AMDGPU_PLL_IS_LCD) {
|
||||
vco_min = pll->lcd_pll_out_min;
|
||||
vco_max = pll->lcd_pll_out_max;
|
||||
} else {
|
||||
vco_min = pll->pll_out_min;
|
||||
vco_max = pll->pll_out_max;
|
||||
}
|
||||
|
||||
if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
|
||||
vco_min *= 10;
|
||||
vco_max *= 10;
|
||||
}
|
||||
|
||||
post_div_min = vco_min / target_clock;
|
||||
if ((target_clock * post_div_min) < vco_min)
|
||||
++post_div_min;
|
||||
if (post_div_min < pll->min_post_div)
|
||||
post_div_min = pll->min_post_div;
|
||||
|
||||
post_div_max = vco_max / target_clock;
|
||||
if ((target_clock * post_div_max) > vco_max)
|
||||
--post_div_max;
|
||||
if (post_div_max > pll->max_post_div)
|
||||
post_div_max = pll->max_post_div;
|
||||
}
|
||||
|
||||
/* represent the searched ratio as fractional number */
|
||||
nom = target_clock;
|
||||
den = pll->reference_freq;
|
||||
|
||||
/* reduce the numbers to a simpler ratio */
|
||||
amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
|
||||
|
||||
/* now search for a post divider */
|
||||
if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
|
||||
post_div_best = post_div_min;
|
||||
else
|
||||
post_div_best = post_div_max;
|
||||
diff_best = ~0;
|
||||
|
||||
for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
|
||||
unsigned diff;
|
||||
amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
|
||||
ref_div_max, &fb_div, &ref_div);
|
||||
diff = abs(target_clock - (pll->reference_freq * fb_div) /
|
||||
(ref_div * post_div));
|
||||
|
||||
if (diff < diff_best || (diff == diff_best &&
|
||||
!(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
|
||||
|
||||
post_div_best = post_div;
|
||||
diff_best = diff;
|
||||
}
|
||||
}
|
||||
post_div = post_div_best;
|
||||
|
||||
/* get the feedback and reference divider for the optimal value */
|
||||
amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
|
||||
&fb_div, &ref_div);
|
||||
|
||||
/* reduce the numbers to a simpler ratio once more */
|
||||
/* this also makes sure that the reference divider is large enough */
|
||||
amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
|
||||
|
||||
/* avoid high jitter with small fractional dividers */
|
||||
if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
|
||||
fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
|
||||
if (fb_div < fb_div_min) {
|
||||
unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
|
||||
fb_div *= tmp;
|
||||
ref_div *= tmp;
|
||||
}
|
||||
}
|
||||
|
||||
/* and finally save the result */
|
||||
if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
|
||||
*fb_div_p = fb_div / 10;
|
||||
*frac_fb_div_p = fb_div % 10;
|
||||
} else {
|
||||
*fb_div_p = fb_div;
|
||||
*frac_fb_div_p = 0;
|
||||
}
|
||||
|
||||
*dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
|
||||
(pll->reference_freq * *frac_fb_div_p)) /
|
||||
(ref_div * post_div * 10);
|
||||
*ref_div_p = ref_div;
|
||||
*post_div_p = post_div;
|
||||
|
||||
DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
|
||||
freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
|
||||
ref_div, post_div);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_pll_get_use_mask - look up a mask of which pplls are in use
|
||||
*
|
||||
* @crtc: drm crtc
|
||||
*
|
||||
* Returns the mask of which PPLLs (Pixel PLLs) are in use.
|
||||
*/
|
||||
u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_crtc *test_crtc;
|
||||
struct amdgpu_crtc *test_amdgpu_crtc;
|
||||
u32 pll_in_use = 0;
|
||||
|
||||
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
|
||||
if (crtc == test_crtc)
|
||||
continue;
|
||||
|
||||
test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
|
||||
if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
|
||||
pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
|
||||
}
|
||||
return pll_in_use;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_pll_get_shared_dp_ppll - return the PPLL used by another crtc for DP
|
||||
*
|
||||
* @crtc: drm crtc
|
||||
*
|
||||
* Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
|
||||
* also in DP mode. For DP, a single PPLL can be used for all DP
|
||||
* crtcs/encoders.
|
||||
*/
|
||||
int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_crtc *test_crtc;
|
||||
struct amdgpu_crtc *test_amdgpu_crtc;
|
||||
|
||||
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
|
||||
if (crtc == test_crtc)
|
||||
continue;
|
||||
test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
|
||||
if (test_amdgpu_crtc->encoder &&
|
||||
ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
|
||||
/* for DP use the same PLL for all */
|
||||
if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
|
||||
return test_amdgpu_crtc->pll_id;
|
||||
}
|
||||
}
|
||||
return ATOM_PPLL_INVALID;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
|
||||
*
|
||||
* @crtc: drm crtc
|
||||
* @encoder: drm encoder
|
||||
*
|
||||
* Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
|
||||
* be shared (i.e., same clock).
|
||||
*/
|
||||
int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_crtc *test_crtc;
|
||||
struct amdgpu_crtc *test_amdgpu_crtc;
|
||||
u32 adjusted_clock, test_adjusted_clock;
|
||||
|
||||
adjusted_clock = amdgpu_crtc->adjusted_clock;
|
||||
|
||||
if (adjusted_clock == 0)
|
||||
return ATOM_PPLL_INVALID;
|
||||
|
||||
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
|
||||
if (crtc == test_crtc)
|
||||
continue;
|
||||
test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
|
||||
if (test_amdgpu_crtc->encoder &&
|
||||
!ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
|
||||
/* check if we are already driving this connector with another crtc */
|
||||
if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
|
||||
/* if we are, return that pll */
|
||||
if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
|
||||
return test_amdgpu_crtc->pll_id;
|
||||
}
|
||||
/* for non-DP check the clock */
|
||||
test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
|
||||
if ((crtc->mode.clock == test_crtc->mode.clock) &&
|
||||
(adjusted_clock == test_adjusted_clock) &&
|
||||
(amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
|
||||
(test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
|
||||
return test_amdgpu_crtc->pll_id;
|
||||
}
|
||||
}
|
||||
return ATOM_PPLL_INVALID;
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_PLL_H__
|
||||
#define __AMDGPU_PLL_H__
|
||||
|
||||
void amdgpu_pll_compute(struct amdgpu_pll *pll,
|
||||
u32 freq,
|
||||
u32 *dot_clock_p,
|
||||
u32 *fb_div_p,
|
||||
u32 *frac_fb_div_p,
|
||||
u32 *ref_div_p,
|
||||
u32 *post_div_p);
|
||||
u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc);
|
||||
int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc);
|
||||
int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,801 @@
|
|||
/*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Rafał Miłecki <zajec5@gmail.com>
|
||||
* Alex Deucher <alexdeucher@gmail.com>
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_drv.h"
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_dpm.h"
|
||||
#include "atom.h"
|
||||
#include <linux/power_supply.h>
|
||||
#include <linux/hwmon.h>
|
||||
#include <linux/hwmon-sysfs.h>
|
||||
|
||||
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->pm.dpm_enabled) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (power_supply_is_system_supplied() > 0)
|
||||
adev->pm.dpm.ac_power = true;
|
||||
else
|
||||
adev->pm.dpm.ac_power = false;
|
||||
if (adev->pm.funcs->enable_bapm)
|
||||
amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_get_dpm_state(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
enum amdgpu_pm_state_type pm = adev->pm.dpm.user_state;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
|
||||
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_set_dpm_state(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (strncmp("battery", buf, strlen("battery")) == 0)
|
||||
adev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
|
||||
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
|
||||
adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
|
||||
else if (strncmp("performance", buf, strlen("performance")) == 0)
|
||||
adev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
|
||||
else {
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
/* Can't set dpm state when the card is off */
|
||||
if (!(adev->flags & AMDGPU_IS_PX) ||
|
||||
(ddev->switch_power_state == DRM_SWITCH_POWER_ON))
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
fail:
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
(level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
|
||||
(level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
enum amdgpu_dpm_forced_level level;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (strncmp("low", buf, strlen("low")) == 0) {
|
||||
level = AMDGPU_DPM_FORCED_LEVEL_LOW;
|
||||
} else if (strncmp("high", buf, strlen("high")) == 0) {
|
||||
level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
|
||||
} else if (strncmp("auto", buf, strlen("auto")) == 0) {
|
||||
level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
|
||||
} else {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if (adev->pm.funcs->force_performance_level) {
|
||||
if (adev->pm.dpm.thermal_active) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
ret = amdgpu_dpm_force_performance_level(adev, level);
|
||||
if (ret)
|
||||
count = -EINVAL;
|
||||
}
|
||||
fail:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
|
||||
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
|
||||
amdgpu_get_dpm_forced_performance_level,
|
||||
amdgpu_set_dpm_forced_performance_level);
|
||||
|
||||
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int temp;
|
||||
|
||||
if (adev->pm.funcs->get_temperature)
|
||||
temp = amdgpu_dpm_get_temperature(adev);
|
||||
else
|
||||
temp = 0;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int hyst = to_sensor_dev_attr(attr)->index;
|
||||
int temp;
|
||||
|
||||
if (hyst)
|
||||
temp = adev->pm.dpm.thermal.min_temp;
|
||||
else
|
||||
temp = adev->pm.dpm.thermal.max_temp;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
u32 pwm_mode = 0;
|
||||
|
||||
if (adev->pm.funcs->get_fan_control_mode)
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
|
||||
/* never 0 (full-speed), fuse or smc-controlled always */
|
||||
return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int err;
|
||||
int value;
|
||||
|
||||
if(!adev->pm.funcs->set_fan_control_mode)
|
||||
return -EINVAL;
|
||||
|
||||
err = kstrtoint(buf, 10, &value);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (value) {
|
||||
case 1: /* manual, percent-based */
|
||||
amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
|
||||
break;
|
||||
default: /* disable */
|
||||
amdgpu_dpm_set_fan_control_mode(adev, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%i\n", 0);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%i\n", 255);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int err;
|
||||
u32 value;
|
||||
|
||||
err = kstrtou32(buf, 10, &value);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
value = (value * 100) / 255;
|
||||
|
||||
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int err;
|
||||
u32 speed;
|
||||
|
||||
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
speed = (speed * 255) / 100;
|
||||
|
||||
return sprintf(buf, "%i\n", speed);
|
||||
}
|
||||
|
||||
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
|
||||
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
|
||||
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
|
||||
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
|
||||
|
||||
static struct attribute *hwmon_attributes[] = {
|
||||
&sensor_dev_attr_temp1_input.dev_attr.attr,
|
||||
&sensor_dev_attr_temp1_crit.dev_attr.attr,
|
||||
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
|
||||
&sensor_dev_attr_pwm1.dev_attr.attr,
|
||||
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
|
||||
&sensor_dev_attr_pwm1_min.dev_attr.attr,
|
||||
&sensor_dev_attr_pwm1_max.dev_attr.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
struct attribute *attr, int index)
|
||||
{
|
||||
struct device *dev = container_of(kobj, struct device, kobj);
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
umode_t effective_mode = attr->mode;
|
||||
|
||||
/* Skip limit attributes if DPM is not enabled */
|
||||
if (!adev->pm.dpm_enabled &&
|
||||
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* Skip fan attributes if fan is not present */
|
||||
if (adev->pm.no_fan &&
|
||||
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* mask fan attributes if we have no bindings for this asic to expose */
|
||||
if ((!adev->pm.funcs->get_fan_speed_percent &&
|
||||
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
|
||||
(!adev->pm.funcs->get_fan_control_mode &&
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
|
||||
effective_mode &= ~S_IRUGO;
|
||||
|
||||
if ((!adev->pm.funcs->set_fan_speed_percent &&
|
||||
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
|
||||
(!adev->pm.funcs->set_fan_control_mode &&
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
|
||||
effective_mode &= ~S_IWUSR;
|
||||
|
||||
/* hide max/min values if we can't both query and manage the fan */
|
||||
if ((!adev->pm.funcs->set_fan_speed_percent &&
|
||||
!adev->pm.funcs->get_fan_speed_percent) &&
|
||||
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
return effective_mode;
|
||||
}
|
||||
|
||||
static const struct attribute_group hwmon_attrgroup = {
|
||||
.attrs = hwmon_attributes,
|
||||
.is_visible = hwmon_attributes_visible,
|
||||
};
|
||||
|
||||
static const struct attribute_group *hwmon_groups[] = {
|
||||
&hwmon_attrgroup,
|
||||
NULL
|
||||
};
|
||||
|
||||
void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_device *adev =
|
||||
container_of(work, struct amdgpu_device,
|
||||
pm.dpm.thermal.work);
|
||||
/* switch to the thermal state */
|
||||
enum amdgpu_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return;
|
||||
|
||||
if (adev->pm.funcs->get_temperature) {
|
||||
int temp = amdgpu_dpm_get_temperature(adev);
|
||||
|
||||
if (temp < adev->pm.dpm.thermal.min_temp)
|
||||
/* switch back the user state */
|
||||
dpm_state = adev->pm.dpm.user_state;
|
||||
} else {
|
||||
if (adev->pm.dpm.thermal.high_to_low)
|
||||
/* switch back the user state */
|
||||
dpm_state = adev->pm.dpm.user_state;
|
||||
}
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
|
||||
adev->pm.dpm.thermal_active = true;
|
||||
else
|
||||
adev->pm.dpm.thermal_active = false;
|
||||
adev->pm.dpm.state = dpm_state;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
}
|
||||
|
||||
static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
|
||||
enum amdgpu_pm_state_type dpm_state)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_ps *ps;
|
||||
u32 ui_class;
|
||||
bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
|
||||
true : false;
|
||||
|
||||
/* check if the vblank period is too short to adjust the mclk */
|
||||
if (single_display && adev->pm.funcs->vblank_too_short) {
|
||||
if (amdgpu_dpm_vblank_too_short(adev))
|
||||
single_display = false;
|
||||
}
|
||||
|
||||
/* certain older asics have a separare 3D performance state,
|
||||
* so try that first if the user selected performance
|
||||
*/
|
||||
if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
|
||||
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
|
||||
/* balanced states don't exist at the moment */
|
||||
if (dpm_state == POWER_STATE_TYPE_BALANCED)
|
||||
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
|
||||
|
||||
restart_search:
|
||||
/* Pick the best power state based on current conditions */
|
||||
for (i = 0; i < adev->pm.dpm.num_ps; i++) {
|
||||
ps = &adev->pm.dpm.ps[i];
|
||||
ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
|
||||
switch (dpm_state) {
|
||||
/* user states */
|
||||
case POWER_STATE_TYPE_BATTERY:
|
||||
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
|
||||
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
|
||||
if (single_display)
|
||||
return ps;
|
||||
} else
|
||||
return ps;
|
||||
}
|
||||
break;
|
||||
case POWER_STATE_TYPE_BALANCED:
|
||||
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
|
||||
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
|
||||
if (single_display)
|
||||
return ps;
|
||||
} else
|
||||
return ps;
|
||||
}
|
||||
break;
|
||||
case POWER_STATE_TYPE_PERFORMANCE:
|
||||
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
|
||||
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
|
||||
if (single_display)
|
||||
return ps;
|
||||
} else
|
||||
return ps;
|
||||
}
|
||||
break;
|
||||
/* internal states */
|
||||
case POWER_STATE_TYPE_INTERNAL_UVD:
|
||||
if (adev->pm.dpm.uvd_ps)
|
||||
return adev->pm.dpm.uvd_ps;
|
||||
else
|
||||
break;
|
||||
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
|
||||
if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
|
||||
return ps;
|
||||
break;
|
||||
case POWER_STATE_TYPE_INTERNAL_UVD_HD:
|
||||
if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
|
||||
return ps;
|
||||
break;
|
||||
case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
|
||||
if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
|
||||
return ps;
|
||||
break;
|
||||
case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
|
||||
if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
|
||||
return ps;
|
||||
break;
|
||||
case POWER_STATE_TYPE_INTERNAL_BOOT:
|
||||
return adev->pm.dpm.boot_ps;
|
||||
case POWER_STATE_TYPE_INTERNAL_THERMAL:
|
||||
if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
|
||||
return ps;
|
||||
break;
|
||||
case POWER_STATE_TYPE_INTERNAL_ACPI:
|
||||
if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
|
||||
return ps;
|
||||
break;
|
||||
case POWER_STATE_TYPE_INTERNAL_ULV:
|
||||
if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
|
||||
return ps;
|
||||
break;
|
||||
case POWER_STATE_TYPE_INTERNAL_3DPERF:
|
||||
if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
|
||||
return ps;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* use a fallback state if we didn't match */
|
||||
switch (dpm_state) {
|
||||
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
|
||||
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
|
||||
goto restart_search;
|
||||
case POWER_STATE_TYPE_INTERNAL_UVD_HD:
|
||||
case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
|
||||
case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
|
||||
if (adev->pm.dpm.uvd_ps) {
|
||||
return adev->pm.dpm.uvd_ps;
|
||||
} else {
|
||||
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
|
||||
goto restart_search;
|
||||
}
|
||||
case POWER_STATE_TYPE_INTERNAL_THERMAL:
|
||||
dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
|
||||
goto restart_search;
|
||||
case POWER_STATE_TYPE_INTERNAL_ACPI:
|
||||
dpm_state = POWER_STATE_TYPE_BATTERY;
|
||||
goto restart_search;
|
||||
case POWER_STATE_TYPE_BATTERY:
|
||||
case POWER_STATE_TYPE_BALANCED:
|
||||
case POWER_STATE_TYPE_INTERNAL_3DPERF:
|
||||
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
|
||||
goto restart_search;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_ps *ps;
|
||||
enum amdgpu_pm_state_type dpm_state;
|
||||
int ret;
|
||||
|
||||
/* if dpm init failed */
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return;
|
||||
|
||||
if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
|
||||
/* add other state override checks here */
|
||||
if ((!adev->pm.dpm.thermal_active) &&
|
||||
(!adev->pm.dpm.uvd_active))
|
||||
adev->pm.dpm.state = adev->pm.dpm.user_state;
|
||||
}
|
||||
dpm_state = adev->pm.dpm.state;
|
||||
|
||||
ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
|
||||
if (ps)
|
||||
adev->pm.dpm.requested_ps = ps;
|
||||
else
|
||||
return;
|
||||
|
||||
/* no need to reprogram if nothing changed unless we are on BTC+ */
|
||||
if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
|
||||
/* vce just modifies an existing state so force a change */
|
||||
if (ps->vce_active != adev->pm.dpm.vce_active)
|
||||
goto force;
|
||||
if (adev->flags & AMDGPU_IS_APU) {
|
||||
/* for APUs if the num crtcs changed but state is the same,
|
||||
* all we need to do is update the display configuration.
|
||||
*/
|
||||
if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
|
||||
/* update display watermarks based on new power state */
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
/* update displays */
|
||||
amdgpu_dpm_display_configuration_changed(adev);
|
||||
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
|
||||
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
/* for BTC+ if the num crtcs hasn't changed and state is the same,
|
||||
* nothing to do, if the num crtcs is > 1 and state is the same,
|
||||
* update display configuration.
|
||||
*/
|
||||
if (adev->pm.dpm.new_active_crtcs ==
|
||||
adev->pm.dpm.current_active_crtcs) {
|
||||
return;
|
||||
} else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
|
||||
(adev->pm.dpm.new_active_crtc_count > 1)) {
|
||||
/* update display watermarks based on new power state */
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
/* update displays */
|
||||
amdgpu_dpm_display_configuration_changed(adev);
|
||||
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
|
||||
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
force:
|
||||
if (amdgpu_dpm == 1) {
|
||||
printk("switching from power state:\n");
|
||||
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
|
||||
printk("switching to power state:\n");
|
||||
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
|
||||
}
|
||||
|
||||
mutex_lock(&adev->ddev->struct_mutex);
|
||||
down_write(&adev->pm.mclk_lock);
|
||||
mutex_lock(&adev->ring_lock);
|
||||
|
||||
/* update whether vce is active */
|
||||
ps->vce_active = adev->pm.dpm.vce_active;
|
||||
|
||||
ret = amdgpu_dpm_pre_set_power_state(adev);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
/* update display watermarks based on new power state */
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
/* update displays */
|
||||
amdgpu_dpm_display_configuration_changed(adev);
|
||||
|
||||
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
|
||||
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
|
||||
|
||||
/* wait for the rings to drain */
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
if (ring && ring->ready)
|
||||
amdgpu_fence_wait_empty(ring);
|
||||
}
|
||||
|
||||
/* program the new power state */
|
||||
amdgpu_dpm_set_power_state(adev);
|
||||
|
||||
/* update current power state */
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
|
||||
|
||||
amdgpu_dpm_post_set_power_state(adev);
|
||||
|
||||
if (adev->pm.funcs->force_performance_level) {
|
||||
if (adev->pm.dpm.thermal_active) {
|
||||
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
|
||||
/* force low perf level for thermal */
|
||||
amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
|
||||
/* save the user's level */
|
||||
adev->pm.dpm.forced_level = level;
|
||||
} else {
|
||||
/* otherwise, user selected level */
|
||||
amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
mutex_unlock(&adev->ring_lock);
|
||||
up_write(&adev->pm.mclk_lock);
|
||||
mutex_unlock(&adev->ddev->struct_mutex);
|
||||
}
|
||||
|
||||
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
if (adev->pm.funcs->powergate_uvd) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
/* enable/disable UVD */
|
||||
amdgpu_dpm_powergate_uvd(adev, !enable);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
} else {
|
||||
if (enable) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.uvd_active = true;
|
||||
adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
} else {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.uvd_active = false;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
if (enable) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.vce_active = true;
|
||||
/* XXX select vce level based on ring/task */
|
||||
adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
} else {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.vce_active = false;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
}
|
||||
|
||||
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->pm.dpm.num_ps; i++) {
|
||||
printk("== power state %d ==\n", i);
|
||||
amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (adev->pm.funcs->get_temperature == NULL)
|
||||
return 0;
|
||||
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
|
||||
DRIVER_NAME, adev,
|
||||
hwmon_groups);
|
||||
if (IS_ERR(adev->pm.int_hwmon_dev)) {
|
||||
ret = PTR_ERR(adev->pm.int_hwmon_dev);
|
||||
dev_err(adev->dev,
|
||||
"Unable to register hwmon device: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file for dpm state\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file for dpm state\n");
|
||||
return ret;
|
||||
}
|
||||
ret = amdgpu_debugfs_pm_init(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to register debugfs file for dpm!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->pm.int_hwmon_dev)
|
||||
hwmon_device_unregister(adev->pm.int_hwmon_dev);
|
||||
device_remove_file(adev->dev, &dev_attr_power_dpm_state);
|
||||
device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
|
||||
}
|
||||
|
||||
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *ddev = adev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
/* update active crtc counts */
|
||||
adev->pm.dpm.new_active_crtcs = 0;
|
||||
adev->pm.dpm.new_active_crtc_count = 0;
|
||||
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
|
||||
list_for_each_entry(crtc,
|
||||
&ddev->mode_config.crtc_list, head) {
|
||||
amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
if (crtc->enabled) {
|
||||
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
|
||||
adev->pm.dpm.new_active_crtc_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* update battery/ac status */
|
||||
if (power_supply_is_system_supplied() > 0)
|
||||
adev->pm.dpm.ac_power = true;
|
||||
else
|
||||
adev->pm.dpm.ac_power = false;
|
||||
|
||||
amdgpu_dpm_change_power_state_locked(adev);
|
||||
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (adev->pm.funcs->debugfs_print_current_performance_level)
|
||||
amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
|
||||
else
|
||||
seq_printf(m, "Debugfs support not implemented for this asic\n");
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_info_list amdgpu_pm_info_list[] = {
|
||||
{"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
|
||||
};
|
||||
#endif
|
||||
|
||||
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_PM_H__
|
||||
#define __AMDGPU_PM_H__
|
||||
|
||||
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
|
||||
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
|
||||
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
|
||||
void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
|
||||
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
|
||||
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Copyright 2012 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* based on nouveau_prime.c
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
int npages = bo->tbo.num_pages;
|
||||
|
||||
return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
|
||||
}
|
||||
|
||||
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
int ret;
|
||||
|
||||
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
|
||||
&bo->dma_buf_vmap);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return bo->dma_buf_vmap.virtual;
|
||||
}
|
||||
|
||||
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
|
||||
ttm_bo_kunmap(&bo->dma_buf_vmap);
|
||||
}
|
||||
|
||||
struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sg)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_bo *bo;
|
||||
int ret;
|
||||
|
||||
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
mutex_lock(&adev->gem.mutex);
|
||||
list_add_tail(&bo->list, &adev->gem.objects);
|
||||
mutex_unlock(&adev->gem.mutex);
|
||||
|
||||
return &bo->gem_base;
|
||||
}
|
||||
|
||||
int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
int ret = 0;
|
||||
|
||||
ret = amdgpu_bo_reserve(bo, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
/* pin buffer into GTT */
|
||||
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
int ret = 0;
|
||||
|
||||
ret = amdgpu_bo_reserve(bo, false);
|
||||
if (unlikely(ret != 0))
|
||||
return;
|
||||
|
||||
amdgpu_bo_unpin(bo);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
}
|
||||
|
||||
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
|
||||
return bo->tbo.resv;
|
||||
}
|
||||
|
||||
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gobj,
|
||||
int flags)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
return drm_gem_prime_export(dev, gobj, flags);
|
||||
}
|
|
@ -0,0 +1,561 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
* Christian König
|
||||
*/
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
|
||||
/*
|
||||
* Rings
|
||||
* Most engines on the GPU are fed via ring buffers. Ring
|
||||
* buffers are areas of GPU accessible memory that the host
|
||||
* writes commands into and the GPU reads commands out of.
|
||||
* There is a rptr (read pointer) that determines where the
|
||||
* GPU is currently reading, and a wptr (write pointer)
|
||||
* which determines where the host has written. When the
|
||||
* pointers are equal, the ring is idle. When the host
|
||||
* writes commands to the ring buffer, it increments the
|
||||
* wptr. The GPU then starts fetching commands and executes
|
||||
* them until the pointers are equal again.
|
||||
*/
|
||||
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
|
||||
|
||||
/**
|
||||
* amdgpu_ring_free_size - update the free size
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Update the free dw slots in the ring buffer (all asics).
|
||||
*/
|
||||
void amdgpu_ring_free_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint32_t rptr = amdgpu_ring_get_rptr(ring);
|
||||
|
||||
/* This works because ring_size is a power of 2 */
|
||||
ring->ring_free_dw = rptr + (ring->ring_size / 4);
|
||||
ring->ring_free_dw -= ring->wptr;
|
||||
ring->ring_free_dw &= ring->ptr_mask;
|
||||
if (!ring->ring_free_dw) {
|
||||
/* this is an empty ring */
|
||||
ring->ring_free_dw = ring->ring_size / 4;
|
||||
/* update lockup info to avoid false positive */
|
||||
amdgpu_ring_lockup_update(ring);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_alloc - allocate space on the ring buffer
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @ndw: number of dwords to allocate in the ring buffer
|
||||
*
|
||||
* Allocate @ndw dwords in the ring buffer (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* make sure we aren't trying to allocate more space than there is on the ring */
|
||||
if (ndw > (ring->ring_size / 4))
|
||||
return -ENOMEM;
|
||||
/* Align requested size with padding so unlock_commit can
|
||||
* pad safely */
|
||||
amdgpu_ring_free_size(ring);
|
||||
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
|
||||
while (ndw > (ring->ring_free_dw - 1)) {
|
||||
amdgpu_ring_free_size(ring);
|
||||
if (ndw < ring->ring_free_dw) {
|
||||
break;
|
||||
}
|
||||
r = amdgpu_fence_wait_next(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
ring->count_dw = ndw;
|
||||
ring->wptr_old = ring->wptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_lock - lock the ring and allocate space on it
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @ndw: number of dwords to allocate in the ring buffer
|
||||
*
|
||||
* Lock the ring and allocate @ndw dwords in the ring buffer
|
||||
* (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
|
||||
{
|
||||
int r;
|
||||
|
||||
mutex_lock(ring->ring_lock);
|
||||
r = amdgpu_ring_alloc(ring, ndw);
|
||||
if (r) {
|
||||
mutex_unlock(ring->ring_lock);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_commit - tell the GPU to execute the new
|
||||
* commands on the ring buffer
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Update the wptr (write pointer) to tell the GPU to
|
||||
* execute new commands on the ring buffer (all asics).
|
||||
*/
|
||||
void amdgpu_ring_commit(struct amdgpu_ring *ring)
|
||||
{
|
||||
/* We pad to match fetch size */
|
||||
while (ring->wptr & ring->align_mask) {
|
||||
amdgpu_ring_write(ring, ring->nop);
|
||||
}
|
||||
mb();
|
||||
amdgpu_ring_set_wptr(ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_unlock_commit - tell the GPU to execute the new
|
||||
* commands on the ring buffer and unlock it
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Call amdgpu_ring_commit() then unlock the ring (all asics).
|
||||
*/
|
||||
void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_commit(ring);
|
||||
mutex_unlock(ring->ring_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_undo - reset the wptr
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Reset the driver's copy of the wptr (all asics).
|
||||
*/
|
||||
void amdgpu_ring_undo(struct amdgpu_ring *ring)
|
||||
{
|
||||
ring->wptr = ring->wptr_old;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_unlock_undo - reset the wptr and unlock the ring
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Call amdgpu_ring_undo() then unlock the ring (all asics).
|
||||
*/
|
||||
void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_undo(ring);
|
||||
mutex_unlock(ring->ring_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_lockup_update - update lockup variables
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Update the last rptr value and timestamp (all asics).
|
||||
*/
|
||||
void amdgpu_ring_lockup_update(struct amdgpu_ring *ring)
|
||||
{
|
||||
atomic_set(&ring->last_rptr, amdgpu_ring_get_rptr(ring));
|
||||
atomic64_set(&ring->last_activity, jiffies_64);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_test_lockup() - check if ring is lockedup by recording information
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
*/
|
||||
bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint32_t rptr = amdgpu_ring_get_rptr(ring);
|
||||
uint64_t last = atomic64_read(&ring->last_activity);
|
||||
uint64_t elapsed;
|
||||
|
||||
if (rptr != atomic_read(&ring->last_rptr)) {
|
||||
/* ring is still working, no lockup */
|
||||
amdgpu_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
|
||||
elapsed = jiffies_to_msecs(jiffies_64 - last);
|
||||
if (amdgpu_lockup_timeout && elapsed >= amdgpu_lockup_timeout) {
|
||||
dev_err(ring->adev->dev, "ring %d stalled for more than %llumsec\n",
|
||||
ring->idx, elapsed);
|
||||
return true;
|
||||
}
|
||||
/* give a chance to the GPU ... */
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_backup - Back up the content of a ring
|
||||
*
|
||||
* @ring: the ring we want to back up
|
||||
*
|
||||
* Saves all unprocessed commits from a ring, returns the number of dwords saved.
|
||||
*/
|
||||
unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
|
||||
uint32_t **data)
|
||||
{
|
||||
unsigned size, ptr, i;
|
||||
|
||||
/* just in case lock the ring */
|
||||
mutex_lock(ring->ring_lock);
|
||||
*data = NULL;
|
||||
|
||||
if (ring->ring_obj == NULL) {
|
||||
mutex_unlock(ring->ring_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* it doesn't make sense to save anything if all fences are signaled */
|
||||
if (!amdgpu_fence_count_emitted(ring)) {
|
||||
mutex_unlock(ring->ring_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
|
||||
|
||||
size = ring->wptr + (ring->ring_size / 4);
|
||||
size -= ptr;
|
||||
size &= ring->ptr_mask;
|
||||
if (size == 0) {
|
||||
mutex_unlock(ring->ring_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* and then save the content of the ring */
|
||||
*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
|
||||
if (!*data) {
|
||||
mutex_unlock(ring->ring_lock);
|
||||
return 0;
|
||||
}
|
||||
for (i = 0; i < size; ++i) {
|
||||
(*data)[i] = ring->ring[ptr++];
|
||||
ptr &= ring->ptr_mask;
|
||||
}
|
||||
|
||||
mutex_unlock(ring->ring_lock);
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_restore - append saved commands to the ring again
|
||||
*
|
||||
* @ring: ring to append commands to
|
||||
* @size: number of dwords we want to write
|
||||
* @data: saved commands
|
||||
*
|
||||
* Allocates space on the ring and restore the previously saved commands.
|
||||
*/
|
||||
int amdgpu_ring_restore(struct amdgpu_ring *ring,
|
||||
unsigned size, uint32_t *data)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
if (!size || !data)
|
||||
return 0;
|
||||
|
||||
/* restore the saved ring content */
|
||||
r = amdgpu_ring_lock(ring, size);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < size; ++i) {
|
||||
amdgpu_ring_write(ring, data[i]);
|
||||
}
|
||||
|
||||
amdgpu_ring_unlock_commit(ring);
|
||||
kfree(data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_init - init driver ring struct.
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @ring_size: size of the ring
|
||||
* @nop: nop packet for this ring
|
||||
*
|
||||
* Initialize the driver information for the selected ring (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned ring_size, u32 nop, u32 align_mask,
|
||||
struct amdgpu_irq_src *irq_src, unsigned irq_type,
|
||||
enum amdgpu_ring_type ring_type)
|
||||
{
|
||||
u32 rb_bufsz;
|
||||
int r;
|
||||
|
||||
if (ring->adev == NULL) {
|
||||
if (adev->num_rings >= AMDGPU_MAX_RINGS)
|
||||
return -EINVAL;
|
||||
|
||||
ring->adev = adev;
|
||||
ring->idx = adev->num_rings++;
|
||||
adev->rings[ring->idx] = ring;
|
||||
amdgpu_fence_driver_init_ring(ring);
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->rptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->wptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->fence_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
|
||||
ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
|
||||
|
||||
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed initializing fences (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
ring->ring_lock = &adev->ring_lock;
|
||||
/* Align ring size */
|
||||
rb_bufsz = order_base_2(ring_size / 8);
|
||||
ring_size = (1 << (rb_bufsz + 1)) * 4;
|
||||
ring->ring_size = ring_size;
|
||||
ring->align_mask = align_mask;
|
||||
ring->nop = nop;
|
||||
ring->type = ring_type;
|
||||
|
||||
/* Allocate ring buffer */
|
||||
if (ring->ring_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0,
|
||||
NULL, &ring->ring_obj);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring create failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_reserve(ring->ring_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&ring->gpu_addr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(ring->ring_obj);
|
||||
dev_err(adev->dev, "(%d) ring pin failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(ring->ring_obj,
|
||||
(void **)&ring->ring);
|
||||
amdgpu_bo_unreserve(ring->ring_obj);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring map failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
ring->ptr_mask = (ring->ring_size / 4) - 1;
|
||||
ring->ring_free_dw = ring->ring_size / 4;
|
||||
|
||||
if (amdgpu_debugfs_ring_init(adev, ring)) {
|
||||
DRM_ERROR("Failed to register debugfs file for rings !\n");
|
||||
}
|
||||
amdgpu_ring_lockup_update(ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_fini - tear down the driver ring struct.
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Tear down the driver information for the selected ring (all asics).
|
||||
*/
|
||||
void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_bo *ring_obj;
|
||||
|
||||
if (ring->ring_lock == NULL)
|
||||
return;
|
||||
|
||||
mutex_lock(ring->ring_lock);
|
||||
ring_obj = ring->ring_obj;
|
||||
ring->ready = false;
|
||||
ring->ring = NULL;
|
||||
ring->ring_obj = NULL;
|
||||
mutex_unlock(ring->ring_lock);
|
||||
|
||||
amdgpu_wb_free(ring->adev, ring->fence_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->rptr_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->wptr_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
|
||||
|
||||
if (ring_obj) {
|
||||
r = amdgpu_bo_reserve(ring_obj, false);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_kunmap(ring_obj);
|
||||
amdgpu_bo_unpin(ring_obj);
|
||||
amdgpu_bo_unreserve(ring_obj);
|
||||
}
|
||||
amdgpu_bo_unref(&ring_obj);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int roffset = *(int*)node->info_ent->data;
|
||||
struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
|
||||
|
||||
uint32_t rptr, wptr, rptr_next;
|
||||
unsigned count, i, j;
|
||||
|
||||
amdgpu_ring_free_size(ring);
|
||||
count = (ring->ring_size / 4) - ring->ring_free_dw;
|
||||
|
||||
wptr = amdgpu_ring_get_wptr(ring);
|
||||
seq_printf(m, "wptr: 0x%08x [%5d]\n",
|
||||
wptr, wptr);
|
||||
|
||||
rptr = amdgpu_ring_get_rptr(ring);
|
||||
seq_printf(m, "rptr: 0x%08x [%5d]\n",
|
||||
rptr, rptr);
|
||||
|
||||
rptr_next = ~0;
|
||||
|
||||
seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
|
||||
ring->wptr, ring->wptr);
|
||||
seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
|
||||
ring->last_semaphore_signal_addr);
|
||||
seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
|
||||
ring->last_semaphore_wait_addr);
|
||||
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
|
||||
seq_printf(m, "%u dwords in ring\n", count);
|
||||
|
||||
if (!ring->ready)
|
||||
return 0;
|
||||
|
||||
/* print 8 dw before current rptr as often it's the last executed
|
||||
* packet that is the root issue
|
||||
*/
|
||||
i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
|
||||
for (j = 0; j <= (count + 32); j++) {
|
||||
seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
|
||||
if (rptr == i)
|
||||
seq_puts(m, " *");
|
||||
if (rptr_next == i)
|
||||
seq_puts(m, " #");
|
||||
seq_puts(m, "\n");
|
||||
i = (i + 1) & ring->ptr_mask;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* TODO: clean this up !*/
|
||||
static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
|
||||
static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
|
||||
static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
|
||||
static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring);
|
||||
static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring);
|
||||
static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
|
||||
static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
|
||||
static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
|
||||
|
||||
static struct drm_info_list amdgpu_debugfs_ring_info_list[] = {
|
||||
{"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index},
|
||||
{"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index},
|
||||
{"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index},
|
||||
{"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index},
|
||||
{"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index},
|
||||
{"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index},
|
||||
{"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index},
|
||||
{"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index},
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
unsigned i;
|
||||
for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
|
||||
struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i];
|
||||
int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data;
|
||||
struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset);
|
||||
unsigned r;
|
||||
|
||||
if (other != ring)
|
||||
continue;
|
||||
|
||||
r = amdgpu_debugfs_add_files(adev, info, 1);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,419 @@
|
|||
/*
|
||||
* Copyright 2011 Red Hat Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Jerome Glisse <glisse@freedesktop.org>
|
||||
*/
|
||||
/* Algorithm:
|
||||
*
|
||||
* We store the last allocated bo in "hole", we always try to allocate
|
||||
* after the last allocated bo. Principle is that in a linear GPU ring
|
||||
* progression was is after last is the oldest bo we allocated and thus
|
||||
* the first one that should no longer be in use by the GPU.
|
||||
*
|
||||
* If it's not the case we skip over the bo after last to the closest
|
||||
* done bo if such one exist. If none exist and we are not asked to
|
||||
* block we report failure to allocate.
|
||||
*
|
||||
* If we are asked to block we wait on all the oldest fence of all
|
||||
* rings. We just wait for any of those fence to complete.
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
|
||||
static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
|
||||
|
||||
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager,
|
||||
unsigned size, u32 align, u32 domain)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
init_waitqueue_head(&sa_manager->wq);
|
||||
sa_manager->bo = NULL;
|
||||
sa_manager->size = size;
|
||||
sa_manager->domain = domain;
|
||||
sa_manager->align = align;
|
||||
sa_manager->hole = &sa_manager->olist;
|
||||
INIT_LIST_HEAD(&sa_manager->olist);
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
INIT_LIST_HEAD(&sa_manager->flist[i]);
|
||||
}
|
||||
|
||||
r = amdgpu_bo_create(adev, size, align, true,
|
||||
domain, 0, NULL, &sa_manager->bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager)
|
||||
{
|
||||
struct amdgpu_sa_bo *sa_bo, *tmp;
|
||||
|
||||
if (!list_empty(&sa_manager->olist)) {
|
||||
sa_manager->hole = &sa_manager->olist,
|
||||
amdgpu_sa_bo_try_free(sa_manager);
|
||||
if (!list_empty(&sa_manager->olist)) {
|
||||
dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
|
||||
}
|
||||
}
|
||||
list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
|
||||
amdgpu_sa_bo_remove_locked(sa_bo);
|
||||
}
|
||||
amdgpu_bo_unref(&sa_manager->bo);
|
||||
sa_manager->size = 0;
|
||||
}
|
||||
|
||||
int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (sa_manager->bo == NULL) {
|
||||
dev_err(adev->dev, "no bo for sa manager\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* map the buffer */
|
||||
r = amdgpu_bo_reserve(sa_manager->bo, false);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(sa_manager->bo);
|
||||
dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
|
||||
amdgpu_bo_unreserve(sa_manager->bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (sa_manager->bo == NULL) {
|
||||
dev_err(adev->dev, "no bo for sa manager\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(sa_manager->bo, false);
|
||||
if (!r) {
|
||||
amdgpu_bo_kunmap(sa_manager->bo);
|
||||
amdgpu_bo_unpin(sa_manager->bo);
|
||||
amdgpu_bo_unreserve(sa_manager->bo);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
|
||||
{
|
||||
struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
|
||||
if (sa_manager->hole == &sa_bo->olist) {
|
||||
sa_manager->hole = sa_bo->olist.prev;
|
||||
}
|
||||
list_del_init(&sa_bo->olist);
|
||||
list_del_init(&sa_bo->flist);
|
||||
amdgpu_fence_unref(&sa_bo->fence);
|
||||
kfree(sa_bo);
|
||||
}
|
||||
|
||||
static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
|
||||
{
|
||||
struct amdgpu_sa_bo *sa_bo, *tmp;
|
||||
|
||||
if (sa_manager->hole->next == &sa_manager->olist)
|
||||
return;
|
||||
|
||||
sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
|
||||
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
|
||||
if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) {
|
||||
return;
|
||||
}
|
||||
amdgpu_sa_bo_remove_locked(sa_bo);
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
|
||||
{
|
||||
struct list_head *hole = sa_manager->hole;
|
||||
|
||||
if (hole != &sa_manager->olist) {
|
||||
return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
|
||||
{
|
||||
struct list_head *hole = sa_manager->hole;
|
||||
|
||||
if (hole->next != &sa_manager->olist) {
|
||||
return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
|
||||
}
|
||||
return sa_manager->size;
|
||||
}
|
||||
|
||||
static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
|
||||
struct amdgpu_sa_bo *sa_bo,
|
||||
unsigned size, unsigned align)
|
||||
{
|
||||
unsigned soffset, eoffset, wasted;
|
||||
|
||||
soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
|
||||
eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
|
||||
wasted = (align - (soffset % align)) % align;
|
||||
|
||||
if ((eoffset - soffset) >= (size + wasted)) {
|
||||
soffset += wasted;
|
||||
|
||||
sa_bo->manager = sa_manager;
|
||||
sa_bo->soffset = soffset;
|
||||
sa_bo->eoffset = soffset + size;
|
||||
list_add(&sa_bo->olist, sa_manager->hole);
|
||||
INIT_LIST_HEAD(&sa_bo->flist);
|
||||
sa_manager->hole = &sa_bo->olist;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sa_event - Check if we can stop waiting
|
||||
*
|
||||
* @sa_manager: pointer to the sa_manager
|
||||
* @size: number of bytes we want to allocate
|
||||
* @align: alignment we need to match
|
||||
*
|
||||
* Check if either there is a fence we can wait for or
|
||||
* enough free memory to satisfy the allocation directly
|
||||
*/
|
||||
static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
|
||||
unsigned size, unsigned align)
|
||||
{
|
||||
unsigned soffset, eoffset, wasted;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
if (!list_empty(&sa_manager->flist[i])) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
|
||||
eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
|
||||
wasted = (align - (soffset % align)) % align;
|
||||
|
||||
if ((eoffset - soffset) >= (size + wasted)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
|
||||
struct amdgpu_fence **fences,
|
||||
unsigned *tries)
|
||||
{
|
||||
struct amdgpu_sa_bo *best_bo = NULL;
|
||||
unsigned i, soffset, best, tmp;
|
||||
|
||||
/* if hole points to the end of the buffer */
|
||||
if (sa_manager->hole->next == &sa_manager->olist) {
|
||||
/* try again with its beginning */
|
||||
sa_manager->hole = &sa_manager->olist;
|
||||
return true;
|
||||
}
|
||||
|
||||
soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
|
||||
/* to handle wrap around we add sa_manager->size */
|
||||
best = sa_manager->size * 2;
|
||||
/* go over all fence list and try to find the closest sa_bo
|
||||
* of the current last
|
||||
*/
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_sa_bo *sa_bo;
|
||||
|
||||
if (list_empty(&sa_manager->flist[i])) {
|
||||
continue;
|
||||
}
|
||||
|
||||
sa_bo = list_first_entry(&sa_manager->flist[i],
|
||||
struct amdgpu_sa_bo, flist);
|
||||
|
||||
if (!amdgpu_fence_signaled(sa_bo->fence)) {
|
||||
fences[i] = sa_bo->fence;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* limit the number of tries each ring gets */
|
||||
if (tries[i] > 2) {
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = sa_bo->soffset;
|
||||
if (tmp < soffset) {
|
||||
/* wrap around, pretend it's after */
|
||||
tmp += sa_manager->size;
|
||||
}
|
||||
tmp -= soffset;
|
||||
if (tmp < best) {
|
||||
/* this sa bo is the closest one */
|
||||
best = tmp;
|
||||
best_bo = sa_bo;
|
||||
}
|
||||
}
|
||||
|
||||
if (best_bo) {
|
||||
++tries[best_bo->fence->ring->idx];
|
||||
sa_manager->hole = best_bo->olist.prev;
|
||||
|
||||
/* we knew that this one is signaled,
|
||||
so it's save to remote it */
|
||||
amdgpu_sa_bo_remove_locked(best_bo);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int amdgpu_sa_bo_new(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager,
|
||||
struct amdgpu_sa_bo **sa_bo,
|
||||
unsigned size, unsigned align)
|
||||
{
|
||||
struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
|
||||
unsigned tries[AMDGPU_MAX_RINGS];
|
||||
int i, r;
|
||||
|
||||
BUG_ON(align > sa_manager->align);
|
||||
BUG_ON(size > sa_manager->size);
|
||||
|
||||
*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
|
||||
if ((*sa_bo) == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
(*sa_bo)->manager = sa_manager;
|
||||
(*sa_bo)->fence = NULL;
|
||||
INIT_LIST_HEAD(&(*sa_bo)->olist);
|
||||
INIT_LIST_HEAD(&(*sa_bo)->flist);
|
||||
|
||||
spin_lock(&sa_manager->wq.lock);
|
||||
do {
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
fences[i] = NULL;
|
||||
tries[i] = 0;
|
||||
}
|
||||
|
||||
do {
|
||||
amdgpu_sa_bo_try_free(sa_manager);
|
||||
|
||||
if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
|
||||
size, align)) {
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* see if we can skip over some allocations */
|
||||
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
|
||||
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
r = amdgpu_fence_wait_any(adev, fences, false);
|
||||
spin_lock(&sa_manager->wq.lock);
|
||||
/* if we have nothing to wait for block */
|
||||
if (r == -ENOENT) {
|
||||
r = wait_event_interruptible_locked(
|
||||
sa_manager->wq,
|
||||
amdgpu_sa_event(sa_manager, size, align)
|
||||
);
|
||||
}
|
||||
|
||||
} while (!r);
|
||||
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
kfree(*sa_bo);
|
||||
*sa_bo = NULL;
|
||||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
|
||||
struct amdgpu_fence *fence)
|
||||
{
|
||||
struct amdgpu_sa_manager *sa_manager;
|
||||
|
||||
if (sa_bo == NULL || *sa_bo == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
sa_manager = (*sa_bo)->manager;
|
||||
spin_lock(&sa_manager->wq.lock);
|
||||
if (fence && !amdgpu_fence_signaled(fence)) {
|
||||
(*sa_bo)->fence = amdgpu_fence_ref(fence);
|
||||
list_add_tail(&(*sa_bo)->flist,
|
||||
&sa_manager->flist[fence->ring->idx]);
|
||||
} else {
|
||||
amdgpu_sa_bo_remove_locked(*sa_bo);
|
||||
}
|
||||
wake_up_all_locked(&sa_manager->wq);
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
*sa_bo = NULL;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
|
||||
struct seq_file *m)
|
||||
{
|
||||
struct amdgpu_sa_bo *i;
|
||||
|
||||
spin_lock(&sa_manager->wq.lock);
|
||||
list_for_each_entry(i, &sa_manager->olist, olist) {
|
||||
uint64_t soffset = i->soffset + sa_manager->gpu_addr;
|
||||
uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
|
||||
if (&i->olist == sa_manager->hole) {
|
||||
seq_printf(m, ">");
|
||||
} else {
|
||||
seq_printf(m, " ");
|
||||
}
|
||||
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
|
||||
soffset, eoffset, eoffset - soffset);
|
||||
if (i->fence) {
|
||||
seq_printf(m, " protected by 0x%016llx on ring %d",
|
||||
i->fence->seq, i->fence->ring->idx);
|
||||
}
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Copyright 2011 Christian König.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Christian König <deathsimple@vodafone.de>
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
int amdgpu_semaphore_create(struct amdgpu_device *adev,
|
||||
struct amdgpu_semaphore **semaphore)
|
||||
{
|
||||
int r;
|
||||
|
||||
*semaphore = kmalloc(sizeof(struct amdgpu_semaphore), GFP_KERNEL);
|
||||
if (*semaphore == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
|
||||
&(*semaphore)->sa_bo, 8, 8);
|
||||
if (r) {
|
||||
kfree(*semaphore);
|
||||
*semaphore = NULL;
|
||||
return r;
|
||||
}
|
||||
(*semaphore)->waiters = 0;
|
||||
(*semaphore)->gpu_addr = amdgpu_sa_bo_gpu_addr((*semaphore)->sa_bo);
|
||||
|
||||
*((uint64_t *)amdgpu_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore)
|
||||
{
|
||||
trace_amdgpu_semaphore_signale(ring->idx, semaphore);
|
||||
|
||||
if (amdgpu_ring_emit_semaphore(ring, semaphore, false)) {
|
||||
--semaphore->waiters;
|
||||
|
||||
/* for debugging lockup only, used by sysfs debug files */
|
||||
ring->last_semaphore_signal_addr = semaphore->gpu_addr;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore)
|
||||
{
|
||||
trace_amdgpu_semaphore_wait(ring->idx, semaphore);
|
||||
|
||||
if (amdgpu_ring_emit_semaphore(ring, semaphore, true)) {
|
||||
++semaphore->waiters;
|
||||
|
||||
/* for debugging lockup only, used by sysfs debug files */
|
||||
ring->last_semaphore_wait_addr = semaphore->gpu_addr;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void amdgpu_semaphore_free(struct amdgpu_device *adev,
|
||||
struct amdgpu_semaphore **semaphore,
|
||||
struct amdgpu_fence *fence)
|
||||
{
|
||||
if (semaphore == NULL || *semaphore == NULL) {
|
||||
return;
|
||||
}
|
||||
if ((*semaphore)->waiters > 0) {
|
||||
dev_err(adev->dev, "semaphore %p has more waiters than signalers,"
|
||||
" hardware lockup imminent!\n", *semaphore);
|
||||
}
|
||||
amdgpu_sa_bo_free(adev, &(*semaphore)->sa_bo, fence);
|
||||
kfree(*semaphore);
|
||||
*semaphore = NULL;
|
||||
}
|
|
@ -0,0 +1,231 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Christian König <christian.koenig@amd.com>
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
/**
|
||||
* amdgpu_sync_create - zero init sync object
|
||||
*
|
||||
* @sync: sync object to initialize
|
||||
*
|
||||
* Just clear the sync object for now.
|
||||
*/
|
||||
void amdgpu_sync_create(struct amdgpu_sync *sync)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
|
||||
sync->semaphores[i] = NULL;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
sync->sync_to[i] = NULL;
|
||||
|
||||
sync->last_vm_update = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_fence - use the semaphore to sync to a fence
|
||||
*
|
||||
* @sync: sync object to add fence to
|
||||
* @fence: fence to sync to
|
||||
*
|
||||
* Sync to the fence using the semaphore objects
|
||||
*/
|
||||
void amdgpu_sync_fence(struct amdgpu_sync *sync,
|
||||
struct amdgpu_fence *fence)
|
||||
{
|
||||
struct amdgpu_fence *other;
|
||||
|
||||
if (!fence)
|
||||
return;
|
||||
|
||||
other = sync->sync_to[fence->ring->idx];
|
||||
sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
|
||||
amdgpu_fence_later(fence, other));
|
||||
amdgpu_fence_unref(&other);
|
||||
|
||||
if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
|
||||
other = sync->last_vm_update;
|
||||
sync->last_vm_update = amdgpu_fence_ref(
|
||||
amdgpu_fence_later(fence, other));
|
||||
amdgpu_fence_unref(&other);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_resv - use the semaphores to sync to a reservation object
|
||||
*
|
||||
* @sync: sync object to add fences from reservation object to
|
||||
* @resv: reservation object with embedded fence
|
||||
* @shared: true if we should only sync to the exclusive fence
|
||||
*
|
||||
* Sync to the fence using the semaphore objects
|
||||
*/
|
||||
int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||
struct amdgpu_sync *sync,
|
||||
struct reservation_object *resv,
|
||||
void *owner)
|
||||
{
|
||||
struct reservation_object_list *flist;
|
||||
struct fence *f;
|
||||
struct amdgpu_fence *fence;
|
||||
unsigned i;
|
||||
int r = 0;
|
||||
|
||||
/* always sync to the exclusive fence */
|
||||
f = reservation_object_get_excl(resv);
|
||||
fence = f ? to_amdgpu_fence(f) : NULL;
|
||||
if (fence && fence->ring->adev == adev)
|
||||
amdgpu_sync_fence(sync, fence);
|
||||
else if (f)
|
||||
r = fence_wait(f, true);
|
||||
|
||||
flist = reservation_object_get_list(resv);
|
||||
if (!flist || r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < flist->shared_count; ++i) {
|
||||
f = rcu_dereference_protected(flist->shared[i],
|
||||
reservation_object_held(resv));
|
||||
fence = to_amdgpu_fence(f);
|
||||
if (fence && fence->ring->adev == adev) {
|
||||
if (fence->owner != owner ||
|
||||
fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED)
|
||||
amdgpu_sync_fence(sync, fence);
|
||||
} else {
|
||||
r = fence_wait(f, true);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_rings - sync ring to all registered fences
|
||||
*
|
||||
* @sync: sync object to use
|
||||
* @ring: ring that needs sync
|
||||
*
|
||||
* Ensure that all registered fences are signaled before letting
|
||||
* the ring continue. The caller must hold the ring lock.
|
||||
*/
|
||||
int amdgpu_sync_rings(struct amdgpu_sync *sync,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned count = 0;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_fence *fence = sync->sync_to[i];
|
||||
struct amdgpu_semaphore *semaphore;
|
||||
struct amdgpu_ring *other = adev->rings[i];
|
||||
|
||||
/* check if we really need to sync */
|
||||
if (!amdgpu_fence_need_sync(fence, ring))
|
||||
continue;
|
||||
|
||||
/* prevent GPU deadlocks */
|
||||
if (!other->ready) {
|
||||
dev_err(adev->dev, "Syncing to a disabled ring!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (count >= AMDGPU_NUM_SYNCS) {
|
||||
/* not enough room, wait manually */
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
if (r)
|
||||
return r;
|
||||
continue;
|
||||
}
|
||||
r = amdgpu_semaphore_create(adev, &semaphore);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
sync->semaphores[count++] = semaphore;
|
||||
|
||||
/* allocate enough space for sync command */
|
||||
r = amdgpu_ring_alloc(other, 16);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* emit the signal semaphore */
|
||||
if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
|
||||
/* signaling wasn't successful wait manually */
|
||||
amdgpu_ring_undo(other);
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
if (r)
|
||||
return r;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* we assume caller has already allocated space on waiters ring */
|
||||
if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
|
||||
/* waiting wasn't successful wait manually */
|
||||
amdgpu_ring_undo(other);
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
if (r)
|
||||
return r;
|
||||
continue;
|
||||
}
|
||||
|
||||
amdgpu_ring_commit(other);
|
||||
amdgpu_fence_note_sync(fence, ring);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_free - free the sync object
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @sync: sync object to use
|
||||
* @fence: fence to use for the free
|
||||
*
|
||||
* Free the sync object by freeing all semaphores in it.
|
||||
*/
|
||||
void amdgpu_sync_free(struct amdgpu_device *adev,
|
||||
struct amdgpu_sync *sync,
|
||||
struct amdgpu_fence *fence)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
|
||||
amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
amdgpu_fence_unref(&sync->sync_to[i]);
|
||||
|
||||
amdgpu_fence_unref(&sync->last_vm_update);
|
||||
}
|
|
@ -0,0 +1,552 @@
|
|||
/*
|
||||
* Copyright 2009 VMware, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Michel Dänzer
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_uvd.h"
|
||||
#include "amdgpu_vce.h"
|
||||
|
||||
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
|
||||
static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
struct amdgpu_bo *vram_obj = NULL;
|
||||
struct amdgpu_bo **gtt_obj = NULL;
|
||||
uint64_t gtt_addr, vram_addr;
|
||||
unsigned n, size;
|
||||
int i, r;
|
||||
|
||||
size = 1024 * 1024;
|
||||
|
||||
/* Number of tests =
|
||||
* (Total GTT - IB pool - writeback page - ring buffers) / test size
|
||||
*/
|
||||
n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
if (adev->rings[i])
|
||||
n -= adev->rings[i]->ring_size;
|
||||
if (adev->wb.wb_obj)
|
||||
n -= AMDGPU_GPU_PAGE_SIZE;
|
||||
if (adev->irq.ih.ring_obj)
|
||||
n -= adev->irq.ih.ring_size;
|
||||
n /= size;
|
||||
|
||||
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
|
||||
if (!gtt_obj) {
|
||||
DRM_ERROR("Failed to allocate %d pointers\n", n);
|
||||
r = 1;
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
NULL, &vram_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create VRAM object\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = amdgpu_bo_reserve(vram_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_unref;
|
||||
r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to pin VRAM object\n");
|
||||
goto out_unres;
|
||||
}
|
||||
for (i = 0; i < n; i++) {
|
||||
void *gtt_map, *vram_map;
|
||||
void **gtt_start, **gtt_end;
|
||||
void **vram_start, **vram_end;
|
||||
struct amdgpu_fence *fence = NULL;
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create GTT object %d\n", i);
|
||||
goto out_lclean;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(gtt_obj[i], false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_lclean_unref;
|
||||
r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, >t_addr);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to pin GTT object %d\n", i);
|
||||
goto out_lclean_unres;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(gtt_obj[i], >t_map);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to map GTT object %d\n", i);
|
||||
goto out_lclean_unpin;
|
||||
}
|
||||
|
||||
for (gtt_start = gtt_map, gtt_end = gtt_map + size;
|
||||
gtt_start < gtt_end;
|
||||
gtt_start++)
|
||||
*gtt_start = gtt_start;
|
||||
|
||||
amdgpu_bo_kunmap(gtt_obj[i]);
|
||||
|
||||
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
|
||||
size, NULL, &fence);
|
||||
|
||||
if (r) {
|
||||
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
|
||||
goto out_lclean_unpin;
|
||||
}
|
||||
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
|
||||
goto out_lclean_unpin;
|
||||
}
|
||||
|
||||
amdgpu_fence_unref(&fence);
|
||||
|
||||
r = amdgpu_bo_kmap(vram_obj, &vram_map);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
|
||||
goto out_lclean_unpin;
|
||||
}
|
||||
|
||||
for (gtt_start = gtt_map, gtt_end = gtt_map + size,
|
||||
vram_start = vram_map, vram_end = vram_map + size;
|
||||
vram_start < vram_end;
|
||||
gtt_start++, vram_start++) {
|
||||
if (*vram_start != gtt_start) {
|
||||
DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
|
||||
"expected 0x%p (GTT/VRAM offset "
|
||||
"0x%16llx/0x%16llx)\n",
|
||||
i, *vram_start, gtt_start,
|
||||
(unsigned long long)
|
||||
(gtt_addr - adev->mc.gtt_start +
|
||||
(void*)gtt_start - gtt_map),
|
||||
(unsigned long long)
|
||||
(vram_addr - adev->mc.vram_start +
|
||||
(void*)gtt_start - gtt_map));
|
||||
amdgpu_bo_kunmap(vram_obj);
|
||||
goto out_lclean_unpin;
|
||||
}
|
||||
*vram_start = vram_start;
|
||||
}
|
||||
|
||||
amdgpu_bo_kunmap(vram_obj);
|
||||
|
||||
r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
|
||||
size, NULL, &fence);
|
||||
|
||||
if (r) {
|
||||
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
|
||||
goto out_lclean_unpin;
|
||||
}
|
||||
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
|
||||
goto out_lclean_unpin;
|
||||
}
|
||||
|
||||
amdgpu_fence_unref(&fence);
|
||||
|
||||
r = amdgpu_bo_kmap(gtt_obj[i], >t_map);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to map GTT object after copy %d\n", i);
|
||||
goto out_lclean_unpin;
|
||||
}
|
||||
|
||||
for (gtt_start = gtt_map, gtt_end = gtt_map + size,
|
||||
vram_start = vram_map, vram_end = vram_map + size;
|
||||
gtt_start < gtt_end;
|
||||
gtt_start++, vram_start++) {
|
||||
if (*gtt_start != vram_start) {
|
||||
DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
|
||||
"expected 0x%p (VRAM/GTT offset "
|
||||
"0x%16llx/0x%16llx)\n",
|
||||
i, *gtt_start, vram_start,
|
||||
(unsigned long long)
|
||||
(vram_addr - adev->mc.vram_start +
|
||||
(void*)vram_start - vram_map),
|
||||
(unsigned long long)
|
||||
(gtt_addr - adev->mc.gtt_start +
|
||||
(void*)vram_start - vram_map));
|
||||
amdgpu_bo_kunmap(gtt_obj[i]);
|
||||
goto out_lclean_unpin;
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_bo_kunmap(gtt_obj[i]);
|
||||
|
||||
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
|
||||
gtt_addr - adev->mc.gtt_start);
|
||||
continue;
|
||||
|
||||
out_lclean_unpin:
|
||||
amdgpu_bo_unpin(gtt_obj[i]);
|
||||
out_lclean_unres:
|
||||
amdgpu_bo_unreserve(gtt_obj[i]);
|
||||
out_lclean_unref:
|
||||
amdgpu_bo_unref(>t_obj[i]);
|
||||
out_lclean:
|
||||
for (--i; i >= 0; --i) {
|
||||
amdgpu_bo_unpin(gtt_obj[i]);
|
||||
amdgpu_bo_unreserve(gtt_obj[i]);
|
||||
amdgpu_bo_unref(>t_obj[i]);
|
||||
}
|
||||
if (fence)
|
||||
amdgpu_fence_unref(&fence);
|
||||
break;
|
||||
}
|
||||
|
||||
amdgpu_bo_unpin(vram_obj);
|
||||
out_unres:
|
||||
amdgpu_bo_unreserve(vram_obj);
|
||||
out_unref:
|
||||
amdgpu_bo_unref(&vram_obj);
|
||||
out_cleanup:
|
||||
kfree(gtt_obj);
|
||||
if (r) {
|
||||
printk(KERN_WARNING "Error while testing BO move.\n");
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_test_moves(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mman.buffer_funcs)
|
||||
amdgpu_do_test_moves(adev);
|
||||
}
|
||||
|
||||
static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring,
|
||||
struct amdgpu_fence **fence)
|
||||
{
|
||||
uint32_t handle = ring->idx ^ 0xdeafbeef;
|
||||
int r;
|
||||
|
||||
if (ring == &adev->uvd.ring) {
|
||||
r = amdgpu_uvd_get_create_msg(ring, handle, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get dummy create msg\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_uvd_get_destroy_msg(ring, handle, fence);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get dummy destroy msg\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
} else if (ring == &adev->vce.ring[0] ||
|
||||
ring == &adev->vce.ring[1]) {
|
||||
r = amdgpu_vce_get_create_msg(ring, handle, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get dummy create msg\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_vce_get_destroy_msg(ring, handle, fence);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get dummy destroy msg\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
} else {
|
||||
r = amdgpu_ring_lock(ring, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
|
||||
return r;
|
||||
}
|
||||
amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
||||
amdgpu_ring_unlock_commit(ring);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_test_ring_sync(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ringA,
|
||||
struct amdgpu_ring *ringB)
|
||||
{
|
||||
struct amdgpu_fence *fence1 = NULL, *fence2 = NULL;
|
||||
struct amdgpu_semaphore *semaphore = NULL;
|
||||
int r;
|
||||
|
||||
r = amdgpu_semaphore_create(adev, &semaphore);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create semaphore\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ringA, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_wait(ringA, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringA);
|
||||
|
||||
r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1);
|
||||
if (r)
|
||||
goto out_cleanup;
|
||||
|
||||
r = amdgpu_ring_lock(ringA, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_wait(ringA, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringA);
|
||||
|
||||
r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2);
|
||||
if (r)
|
||||
goto out_cleanup;
|
||||
|
||||
mdelay(1000);
|
||||
|
||||
if (amdgpu_fence_signaled(fence1)) {
|
||||
DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ringB, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring B %p\n", ringB);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_signal(ringB, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringB);
|
||||
|
||||
r = amdgpu_fence_wait(fence1, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for sync fence 1\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
mdelay(1000);
|
||||
|
||||
if (amdgpu_fence_signaled(fence2)) {
|
||||
DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ringB, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring B %p\n", ringB);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_signal(ringB, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringB);
|
||||
|
||||
r = amdgpu_fence_wait(fence2, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for sync fence 1\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
out_cleanup:
|
||||
amdgpu_semaphore_free(adev, &semaphore, NULL);
|
||||
|
||||
if (fence1)
|
||||
amdgpu_fence_unref(&fence1);
|
||||
|
||||
if (fence2)
|
||||
amdgpu_fence_unref(&fence2);
|
||||
|
||||
if (r)
|
||||
printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
|
||||
}
|
||||
|
||||
static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ringA,
|
||||
struct amdgpu_ring *ringB,
|
||||
struct amdgpu_ring *ringC)
|
||||
{
|
||||
struct amdgpu_fence *fenceA = NULL, *fenceB = NULL;
|
||||
struct amdgpu_semaphore *semaphore = NULL;
|
||||
bool sigA, sigB;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_semaphore_create(adev, &semaphore);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create semaphore\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ringA, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_wait(ringA, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringA);
|
||||
|
||||
r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA);
|
||||
if (r)
|
||||
goto out_cleanup;
|
||||
|
||||
r = amdgpu_ring_lock(ringB, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_wait(ringB, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringB);
|
||||
r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB);
|
||||
if (r)
|
||||
goto out_cleanup;
|
||||
|
||||
mdelay(1000);
|
||||
|
||||
if (amdgpu_fence_signaled(fenceA)) {
|
||||
DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
if (amdgpu_fence_signaled(fenceB)) {
|
||||
DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ringC, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring B %p\n", ringC);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_signal(ringC, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringC);
|
||||
|
||||
for (i = 0; i < 30; ++i) {
|
||||
mdelay(100);
|
||||
sigA = amdgpu_fence_signaled(fenceA);
|
||||
sigB = amdgpu_fence_signaled(fenceB);
|
||||
if (sigA || sigB)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!sigA && !sigB) {
|
||||
DRM_ERROR("Neither fence A nor B has been signaled\n");
|
||||
goto out_cleanup;
|
||||
} else if (sigA && sigB) {
|
||||
DRM_ERROR("Both fence A and B has been signaled\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
|
||||
|
||||
r = amdgpu_ring_lock(ringC, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring B %p\n", ringC);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_signal(ringC, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringC);
|
||||
|
||||
mdelay(1000);
|
||||
|
||||
r = amdgpu_fence_wait(fenceA, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for sync fence A\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = amdgpu_fence_wait(fenceB, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for sync fence B\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
out_cleanup:
|
||||
amdgpu_semaphore_free(adev, &semaphore, NULL);
|
||||
|
||||
if (fenceA)
|
||||
amdgpu_fence_unref(&fenceA);
|
||||
|
||||
if (fenceB)
|
||||
amdgpu_fence_unref(&fenceB);
|
||||
|
||||
if (r)
|
||||
printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
|
||||
}
|
||||
|
||||
static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA,
|
||||
struct amdgpu_ring *ringB)
|
||||
{
|
||||
if (ringA == &ringA->adev->vce.ring[0] &&
|
||||
ringB == &ringB->adev->vce.ring[1])
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void amdgpu_test_syncing(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j, k;
|
||||
|
||||
for (i = 1; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ringA = adev->rings[i];
|
||||
if (!ringA || !ringA->ready)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < i; ++j) {
|
||||
struct amdgpu_ring *ringB = adev->rings[j];
|
||||
if (!ringB || !ringB->ready)
|
||||
continue;
|
||||
|
||||
if (!amdgpu_test_sync_possible(ringA, ringB))
|
||||
continue;
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
|
||||
amdgpu_test_ring_sync(adev, ringA, ringB);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
|
||||
amdgpu_test_ring_sync(adev, ringB, ringA);
|
||||
|
||||
for (k = 0; k < j; ++k) {
|
||||
struct amdgpu_ring *ringC = adev->rings[k];
|
||||
if (!ringC || !ringC->ready)
|
||||
continue;
|
||||
|
||||
if (!amdgpu_test_sync_possible(ringA, ringC))
|
||||
continue;
|
||||
|
||||
if (!amdgpu_test_sync_possible(ringB, ringC))
|
||||
continue;
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
|
||||
amdgpu_test_ring_sync2(adev, ringA, ringB, ringC);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
|
||||
amdgpu_test_ring_sync2(adev, ringA, ringC, ringB);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
|
||||
amdgpu_test_ring_sync2(adev, ringB, ringA, ringC);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
|
||||
amdgpu_test_ring_sync2(adev, ringB, ringC, ringA);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
|
||||
amdgpu_test_ring_sync2(adev, ringC, ringA, ringB);
|
||||
|
||||
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
|
||||
amdgpu_test_ring_sync2(adev, ringC, ringB, ringA);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,209 @@
|
|||
#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _AMDGPU_TRACE_H_
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM amdgpu
|
||||
#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
|
||||
#define TRACE_INCLUDE_FILE amdgpu_trace
|
||||
|
||||
TRACE_EVENT(amdgpu_bo_create,
|
||||
TP_PROTO(struct amdgpu_bo *bo),
|
||||
TP_ARGS(bo),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct amdgpu_bo *, bo)
|
||||
__field(u32, pages)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->bo = bo;
|
||||
__entry->pages = bo->tbo.num_pages;
|
||||
),
|
||||
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_cs,
|
||||
TP_PROTO(struct amdgpu_cs_parser *p, int i),
|
||||
TP_ARGS(p, i),
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, ring)
|
||||
__field(u32, dw)
|
||||
__field(u32, fences)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ring = p->ibs[i].ring->idx;
|
||||
__entry->dw = p->ibs[i].length_dw;
|
||||
__entry->fences = amdgpu_fence_count_emitted(
|
||||
p->ibs[i].ring);
|
||||
),
|
||||
TP_printk("ring=%u, dw=%u, fences=%u",
|
||||
__entry->ring, __entry->dw,
|
||||
__entry->fences)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_vm_grab_id,
|
||||
TP_PROTO(unsigned vmid, int ring),
|
||||
TP_ARGS(vmid, ring),
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, vmid)
|
||||
__field(u32, ring)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vmid = vmid;
|
||||
__entry->ring = ring;
|
||||
),
|
||||
TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_vm_bo_update,
|
||||
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
|
||||
TP_ARGS(mapping),
|
||||
TP_STRUCT__entry(
|
||||
__field(u64, soffset)
|
||||
__field(u64, eoffset)
|
||||
__field(u32, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->soffset = mapping->it.start;
|
||||
__entry->eoffset = mapping->it.last + 1;
|
||||
__entry->flags = mapping->flags;
|
||||
),
|
||||
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
|
||||
__entry->soffset, __entry->eoffset, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_vm_set_page,
|
||||
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags),
|
||||
TP_ARGS(pe, addr, count, incr, flags),
|
||||
TP_STRUCT__entry(
|
||||
__field(u64, pe)
|
||||
__field(u64, addr)
|
||||
__field(u32, count)
|
||||
__field(u32, incr)
|
||||
__field(u32, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pe = pe;
|
||||
__entry->addr = addr;
|
||||
__entry->count = count;
|
||||
__entry->incr = incr;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
|
||||
__entry->pe, __entry->addr, __entry->incr,
|
||||
__entry->flags, __entry->count)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_vm_flush,
|
||||
TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
|
||||
TP_ARGS(pd_addr, ring, id),
|
||||
TP_STRUCT__entry(
|
||||
__field(u64, pd_addr)
|
||||
__field(u32, ring)
|
||||
__field(u32, id)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pd_addr = pd_addr;
|
||||
__entry->ring = ring;
|
||||
__entry->id = id;
|
||||
),
|
||||
TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
|
||||
__entry->pd_addr, __entry->ring, __entry->id)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(amdgpu_fence_request,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, ring, seqno),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(int, ring)
|
||||
__field(u32, seqno)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = dev->primary->index;
|
||||
__entry->ring = ring;
|
||||
__entry->seqno = seqno;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%d, seqno=%u",
|
||||
__entry->dev, __entry->ring, __entry->seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, ring, seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, ring, seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, ring, seqno)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
|
||||
|
||||
TP_PROTO(int ring, struct amdgpu_semaphore *sem),
|
||||
|
||||
TP_ARGS(ring, sem),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, ring)
|
||||
__field(signed, waiters)
|
||||
__field(uint64_t, gpu_addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ring = ring;
|
||||
__entry->waiters = sem->waiters;
|
||||
__entry->gpu_addr = sem->gpu_addr;
|
||||
),
|
||||
|
||||
TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
|
||||
__entry->waiters, __entry->gpu_addr)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale,
|
||||
|
||||
TP_PROTO(int ring, struct amdgpu_semaphore *sem),
|
||||
|
||||
TP_ARGS(ring, sem)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait,
|
||||
|
||||
TP_PROTO(int ring, struct amdgpu_semaphore *sem),
|
||||
|
||||
TP_ARGS(ring, sem)
|
||||
);
|
||||
|
||||
#endif
|
||||
|
||||
/* This part must be outside protection */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#include <trace/define_trace.h>
|
|
@ -0,0 +1,9 @@
|
|||
/* Copyright Red Hat Inc 2010.
|
||||
* Author : Dave Airlie <airlied@redhat.com>
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "amdgpu_trace.h"
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,317 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
|
||||
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
|
||||
DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
|
||||
DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
|
||||
DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
|
||||
DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
|
||||
DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
|
||||
DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
|
||||
DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
|
||||
DRM_DEBUG("ucode_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(hdr->ucode_array_offset_bytes));
|
||||
DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
|
||||
}
|
||||
|
||||
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
|
||||
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
|
||||
|
||||
DRM_DEBUG("MC\n");
|
||||
amdgpu_ucode_print_common_hdr(hdr);
|
||||
|
||||
if (version_major == 1) {
|
||||
const struct mc_firmware_header_v1_0 *mc_hdr =
|
||||
container_of(hdr, struct mc_firmware_header_v1_0, header);
|
||||
|
||||
DRM_DEBUG("io_debug_size_bytes: %u\n",
|
||||
le32_to_cpu(mc_hdr->io_debug_size_bytes));
|
||||
DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
|
||||
} else {
|
||||
DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
|
||||
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
|
||||
|
||||
DRM_DEBUG("SMC\n");
|
||||
amdgpu_ucode_print_common_hdr(hdr);
|
||||
|
||||
if (version_major == 1) {
|
||||
const struct smc_firmware_header_v1_0 *smc_hdr =
|
||||
container_of(hdr, struct smc_firmware_header_v1_0, header);
|
||||
|
||||
DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
|
||||
} else {
|
||||
DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
|
||||
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
|
||||
|
||||
DRM_DEBUG("GFX\n");
|
||||
amdgpu_ucode_print_common_hdr(hdr);
|
||||
|
||||
if (version_major == 1) {
|
||||
const struct gfx_firmware_header_v1_0 *gfx_hdr =
|
||||
container_of(hdr, struct gfx_firmware_header_v1_0, header);
|
||||
|
||||
DRM_DEBUG("ucode_feature_version: %u\n",
|
||||
le32_to_cpu(gfx_hdr->ucode_feature_version));
|
||||
DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
|
||||
DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
|
||||
} else {
|
||||
DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
|
||||
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
|
||||
|
||||
DRM_DEBUG("RLC\n");
|
||||
amdgpu_ucode_print_common_hdr(hdr);
|
||||
|
||||
if (version_major == 1) {
|
||||
const struct rlc_firmware_header_v1_0 *rlc_hdr =
|
||||
container_of(hdr, struct rlc_firmware_header_v1_0, header);
|
||||
|
||||
DRM_DEBUG("ucode_feature_version: %u\n",
|
||||
le32_to_cpu(rlc_hdr->ucode_feature_version));
|
||||
DRM_DEBUG("save_and_restore_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->save_and_restore_offset));
|
||||
DRM_DEBUG("clear_state_descriptor_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
|
||||
DRM_DEBUG("avail_scratch_ram_locations: %u\n",
|
||||
le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
|
||||
DRM_DEBUG("master_pkt_description_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->master_pkt_description_offset));
|
||||
} else if (version_major == 2) {
|
||||
const struct rlc_firmware_header_v2_0 *rlc_hdr =
|
||||
container_of(hdr, struct rlc_firmware_header_v2_0, header);
|
||||
|
||||
DRM_DEBUG("ucode_feature_version: %u\n",
|
||||
le32_to_cpu(rlc_hdr->ucode_feature_version));
|
||||
DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
|
||||
DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
|
||||
DRM_DEBUG("save_and_restore_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->save_and_restore_offset));
|
||||
DRM_DEBUG("clear_state_descriptor_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
|
||||
DRM_DEBUG("avail_scratch_ram_locations: %u\n",
|
||||
le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
|
||||
DRM_DEBUG("reg_restore_list_size: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_restore_list_size));
|
||||
DRM_DEBUG("reg_list_format_start: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_start));
|
||||
DRM_DEBUG("reg_list_format_separate_start: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
|
||||
DRM_DEBUG("starting_offsets_start: %u\n",
|
||||
le32_to_cpu(rlc_hdr->starting_offsets_start));
|
||||
DRM_DEBUG("reg_list_format_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
|
||||
DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
|
||||
DRM_DEBUG("reg_list_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_size_bytes));
|
||||
DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
|
||||
DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
|
||||
DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
|
||||
DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
|
||||
DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
|
||||
} else {
|
||||
DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
|
||||
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
|
||||
|
||||
DRM_DEBUG("SDMA\n");
|
||||
amdgpu_ucode_print_common_hdr(hdr);
|
||||
|
||||
if (version_major == 1) {
|
||||
const struct sdma_firmware_header_v1_0 *sdma_hdr =
|
||||
container_of(hdr, struct sdma_firmware_header_v1_0, header);
|
||||
|
||||
DRM_DEBUG("ucode_feature_version: %u\n",
|
||||
le32_to_cpu(sdma_hdr->ucode_feature_version));
|
||||
DRM_DEBUG("ucode_change_version: %u\n",
|
||||
le32_to_cpu(sdma_hdr->ucode_change_version));
|
||||
DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
|
||||
DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
|
||||
if (version_minor >= 1) {
|
||||
const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr =
|
||||
container_of(sdma_hdr, struct sdma_firmware_header_v1_1, v1_0);
|
||||
DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr->digest_size));
|
||||
}
|
||||
} else {
|
||||
DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
|
||||
version_major, version_minor);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_ucode_validate(const struct firmware *fw)
|
||||
{
|
||||
const struct common_firmware_header *hdr =
|
||||
(const struct common_firmware_header *)fw->data;
|
||||
|
||||
if (fw->size == le32_to_cpu(hdr->size_bytes))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
|
||||
uint16_t hdr_major, uint16_t hdr_minor)
|
||||
{
|
||||
if ((hdr->common.header_version_major == hdr_major) &&
|
||||
(hdr->common.header_version_minor == hdr_minor))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
|
||||
uint64_t mc_addr, void *kptr)
|
||||
{
|
||||
const struct common_firmware_header *header = NULL;
|
||||
|
||||
if (NULL == ucode->fw)
|
||||
return 0;
|
||||
|
||||
ucode->mc_addr = mc_addr;
|
||||
ucode->kaddr = kptr;
|
||||
|
||||
header = (const struct common_firmware_header *)ucode->fw->data;
|
||||
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
|
||||
le32_to_cpu(header->ucode_array_offset_bytes)),
|
||||
le32_to_cpu(header->ucode_size_bytes));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
|
||||
uint64_t fw_mc_addr;
|
||||
void *fw_buf_ptr = NULL;
|
||||
uint64_t fw_offset = 0;
|
||||
int i, err;
|
||||
struct amdgpu_firmware_info *ucode = NULL;
|
||||
const struct common_firmware_header *header = NULL;
|
||||
|
||||
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo);
|
||||
if (err) {
|
||||
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
|
||||
err = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
err = amdgpu_bo_reserve(*bo, false);
|
||||
if (err) {
|
||||
amdgpu_bo_unref(bo);
|
||||
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
|
||||
if (err) {
|
||||
amdgpu_bo_unreserve(*bo);
|
||||
amdgpu_bo_unref(bo);
|
||||
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
|
||||
if (err) {
|
||||
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
|
||||
amdgpu_bo_unpin(*bo);
|
||||
amdgpu_bo_unreserve(*bo);
|
||||
amdgpu_bo_unref(bo);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
amdgpu_bo_unreserve(*bo);
|
||||
|
||||
fw_offset = 0;
|
||||
for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) {
|
||||
ucode = &adev->firmware.ucode[i];
|
||||
if (ucode->fw) {
|
||||
header = (const struct common_firmware_header *)ucode->fw->data;
|
||||
amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
|
||||
fw_buf_ptr + fw_offset);
|
||||
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
failed:
|
||||
if (err)
|
||||
adev->firmware.smu_load = false;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_firmware_info *ucode = NULL;
|
||||
|
||||
for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) {
|
||||
ucode = &adev->firmware.ucode[i];
|
||||
if (ucode->fw) {
|
||||
ucode->mc_addr = 0;
|
||||
ucode->kaddr = NULL;
|
||||
}
|
||||
}
|
||||
amdgpu_bo_unref(&adev->firmware.fw_buf);
|
||||
adev->firmware.fw_buf = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,176 @@
|
|||
/*
|
||||
* Copyright 2012 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef __AMDGPU_UCODE_H__
|
||||
#define __AMDGPU_UCODE_H__
|
||||
|
||||
struct common_firmware_header {
|
||||
uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
|
||||
uint32_t header_size_bytes; /* size of just the header in bytes */
|
||||
uint16_t header_version_major; /* header version */
|
||||
uint16_t header_version_minor; /* header version */
|
||||
uint16_t ip_version_major; /* IP version */
|
||||
uint16_t ip_version_minor; /* IP version */
|
||||
uint32_t ucode_version;
|
||||
uint32_t ucode_size_bytes; /* size of ucode in bytes */
|
||||
uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */
|
||||
uint32_t crc32; /* crc32 checksum of the payload */
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct mc_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t io_debug_size_bytes; /* size of debug array in dwords */
|
||||
uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct smc_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ucode_start_addr;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct gfx_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ucode_feature_version;
|
||||
uint32_t jt_offset; /* jt location */
|
||||
uint32_t jt_size; /* size of jt */
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct rlc_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ucode_feature_version;
|
||||
uint32_t save_and_restore_offset;
|
||||
uint32_t clear_state_descriptor_offset;
|
||||
uint32_t avail_scratch_ram_locations;
|
||||
uint32_t master_pkt_description_offset;
|
||||
};
|
||||
|
||||
/* version_major=2, version_minor=0 */
|
||||
struct rlc_firmware_header_v2_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ucode_feature_version;
|
||||
uint32_t jt_offset; /* jt location */
|
||||
uint32_t jt_size; /* size of jt */
|
||||
uint32_t save_and_restore_offset;
|
||||
uint32_t clear_state_descriptor_offset;
|
||||
uint32_t avail_scratch_ram_locations;
|
||||
uint32_t reg_restore_list_size;
|
||||
uint32_t reg_list_format_start;
|
||||
uint32_t reg_list_format_separate_start;
|
||||
uint32_t starting_offsets_start;
|
||||
uint32_t reg_list_format_size_bytes; /* size of reg list format array in bytes */
|
||||
uint32_t reg_list_format_array_offset_bytes; /* payload offset from the start of the header */
|
||||
uint32_t reg_list_size_bytes; /* size of reg list array in bytes */
|
||||
uint32_t reg_list_array_offset_bytes; /* payload offset from the start of the header */
|
||||
uint32_t reg_list_format_separate_size_bytes; /* size of reg list format array in bytes */
|
||||
uint32_t reg_list_format_separate_array_offset_bytes; /* payload offset from the start of the header */
|
||||
uint32_t reg_list_separate_size_bytes; /* size of reg list array in bytes */
|
||||
uint32_t reg_list_separate_array_offset_bytes; /* payload offset from the start of the header */
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct sdma_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ucode_feature_version;
|
||||
uint32_t ucode_change_version;
|
||||
uint32_t jt_offset; /* jt location */
|
||||
uint32_t jt_size; /* size of jt */
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=1 */
|
||||
struct sdma_firmware_header_v1_1 {
|
||||
struct sdma_firmware_header_v1_0 v1_0;
|
||||
uint32_t digest_size;
|
||||
};
|
||||
|
||||
/* header is fixed size */
|
||||
union amdgpu_firmware_header {
|
||||
struct common_firmware_header common;
|
||||
struct mc_firmware_header_v1_0 mc;
|
||||
struct smc_firmware_header_v1_0 smc;
|
||||
struct gfx_firmware_header_v1_0 gfx;
|
||||
struct rlc_firmware_header_v1_0 rlc;
|
||||
struct rlc_firmware_header_v2_0 rlc_v2_0;
|
||||
struct sdma_firmware_header_v1_0 sdma;
|
||||
struct sdma_firmware_header_v1_1 sdma_v1_1;
|
||||
uint8_t raw[0x100];
|
||||
};
|
||||
|
||||
/*
|
||||
* fw loading support
|
||||
*/
|
||||
enum AMDGPU_UCODE_ID {
|
||||
AMDGPU_UCODE_ID_SDMA0 = 0,
|
||||
AMDGPU_UCODE_ID_SDMA1,
|
||||
AMDGPU_UCODE_ID_CP_CE,
|
||||
AMDGPU_UCODE_ID_CP_PFP,
|
||||
AMDGPU_UCODE_ID_CP_ME,
|
||||
AMDGPU_UCODE_ID_CP_MEC1,
|
||||
AMDGPU_UCODE_ID_CP_MEC2,
|
||||
AMDGPU_UCODE_ID_RLC_G,
|
||||
AMDGPU_UCODE_ID_MAXIMUM,
|
||||
};
|
||||
|
||||
/* engine firmware status */
|
||||
enum AMDGPU_UCODE_STATUS {
|
||||
AMDGPU_UCODE_STATUS_INVALID,
|
||||
AMDGPU_UCODE_STATUS_NOT_LOADED,
|
||||
AMDGPU_UCODE_STATUS_LOADED,
|
||||
};
|
||||
|
||||
/* conform to smu_ucode_xfer_cz.h */
|
||||
#define AMDGPU_SDMA0_UCODE_LOADED 0x00000001
|
||||
#define AMDGPU_SDMA1_UCODE_LOADED 0x00000002
|
||||
#define AMDGPU_CPCE_UCODE_LOADED 0x00000004
|
||||
#define AMDGPU_CPPFP_UCODE_LOADED 0x00000008
|
||||
#define AMDGPU_CPME_UCODE_LOADED 0x00000010
|
||||
#define AMDGPU_CPMEC1_UCODE_LOADED 0x00000020
|
||||
#define AMDGPU_CPMEC2_UCODE_LOADED 0x00000040
|
||||
#define AMDGPU_CPRLC_UCODE_LOADED 0x00000100
|
||||
|
||||
/* amdgpu firmware info */
|
||||
struct amdgpu_firmware_info {
|
||||
/* ucode ID */
|
||||
enum AMDGPU_UCODE_ID ucode_id;
|
||||
/* request_firmware */
|
||||
const struct firmware *fw;
|
||||
/* starting mc address */
|
||||
uint64_t mc_addr;
|
||||
/* kernel linear address */
|
||||
void *kaddr;
|
||||
};
|
||||
|
||||
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
|
||||
void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
|
||||
void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
|
||||
void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
|
||||
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
|
||||
int amdgpu_ucode_validate(const struct firmware *fw);
|
||||
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
|
||||
uint16_t hdr_major, uint16_t hdr_minor);
|
||||
int amdgpu_ucode_init_bo(struct amdgpu_device *adev);
|
||||
int amdgpu_ucode_fini_bo(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,976 @@
|
|||
/*
|
||||
* Copyright 2011 Advanced Micro Devices, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Christian König <deathsimple@vodafone.de>
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/module.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_uvd.h"
|
||||
#include "cikd.h"
|
||||
#include "uvd/uvd_4_2_d.h"
|
||||
|
||||
/* 1 second timeout */
|
||||
#define UVD_IDLE_TIMEOUT_MS 1000
|
||||
|
||||
/* Firmware Names */
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
|
||||
#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
|
||||
#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
|
||||
#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
|
||||
#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
|
||||
#endif
|
||||
#define FIRMWARE_TONGA "radeon/tonga_uvd.bin"
|
||||
#define FIRMWARE_CARRIZO "radeon/carrizo_uvd.bin"
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_cs_ctx - Command submission parser context
|
||||
*
|
||||
* Used for emulating virtual memory support on UVD 4.2.
|
||||
*/
|
||||
struct amdgpu_uvd_cs_ctx {
|
||||
struct amdgpu_cs_parser *parser;
|
||||
unsigned reg, count;
|
||||
unsigned data0, data1;
|
||||
unsigned idx;
|
||||
unsigned ib_idx;
|
||||
|
||||
/* does the IB has a msg command */
|
||||
bool has_msg_cmd;
|
||||
|
||||
/* minimum buffer sizes */
|
||||
unsigned *buf_sizes;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
|
||||
MODULE_FIRMWARE(FIRMWARE_KABINI);
|
||||
MODULE_FIRMWARE(FIRMWARE_KAVERI);
|
||||
MODULE_FIRMWARE(FIRMWARE_HAWAII);
|
||||
MODULE_FIRMWARE(FIRMWARE_MULLINS);
|
||||
#endif
|
||||
MODULE_FIRMWARE(FIRMWARE_TONGA);
|
||||
MODULE_FIRMWARE(FIRMWARE_CARRIZO);
|
||||
|
||||
static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
|
||||
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
|
||||
|
||||
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned long bo_size;
|
||||
const char *fw_name;
|
||||
const struct common_firmware_header *hdr;
|
||||
unsigned version_major, version_minor, family_id;
|
||||
int i, r;
|
||||
|
||||
INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_BONAIRE:
|
||||
fw_name = FIRMWARE_BONAIRE;
|
||||
break;
|
||||
case CHIP_KABINI:
|
||||
fw_name = FIRMWARE_KABINI;
|
||||
break;
|
||||
case CHIP_KAVERI:
|
||||
fw_name = FIRMWARE_KAVERI;
|
||||
break;
|
||||
case CHIP_HAWAII:
|
||||
fw_name = FIRMWARE_HAWAII;
|
||||
break;
|
||||
case CHIP_MULLINS:
|
||||
fw_name = FIRMWARE_MULLINS;
|
||||
break;
|
||||
#endif
|
||||
case CHIP_TONGA:
|
||||
fw_name = FIRMWARE_TONGA;
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
fw_name = FIRMWARE_CARRIZO;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
|
||||
fw_name);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ucode_validate(adev->uvd.fw);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
|
||||
fw_name);
|
||||
release_firmware(adev->uvd.fw);
|
||||
adev->uvd.fw = NULL;
|
||||
return r;
|
||||
}
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
|
||||
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
|
||||
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
|
||||
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
|
||||
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
|
||||
version_major, version_minor, family_id);
|
||||
|
||||
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
|
||||
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
|
||||
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
|
||||
if (r) {
|
||||
amdgpu_bo_unref(&adev->uvd.vcpu_bo);
|
||||
dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->uvd.gpu_addr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
|
||||
amdgpu_bo_unref(&adev->uvd.vcpu_bo);
|
||||
dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) UVD map failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
atomic_set(&adev->uvd.handles[i], 0);
|
||||
adev->uvd.filp[i] = NULL;
|
||||
}
|
||||
|
||||
/* from uvd v5.0 HW addressing capacity increased to 64 bits */
|
||||
if (!amdgpu_ip_block_version_cmp(adev, AMDGPU_IP_BLOCK_TYPE_UVD, 5, 0))
|
||||
adev->uvd.address_64_bit = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->uvd.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
|
||||
if (!r) {
|
||||
amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
|
||||
amdgpu_bo_unpin(adev->uvd.vcpu_bo);
|
||||
amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
|
||||
}
|
||||
|
||||
amdgpu_bo_unref(&adev->uvd.vcpu_bo);
|
||||
|
||||
amdgpu_ring_fini(&adev->uvd.ring);
|
||||
|
||||
release_firmware(adev->uvd.fw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned size;
|
||||
void *ptr;
|
||||
const struct common_firmware_header *hdr;
|
||||
int i;
|
||||
|
||||
if (adev->uvd.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
break;
|
||||
|
||||
if (i == AMDGPU_MAX_UVD_HANDLES)
|
||||
return 0;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
|
||||
|
||||
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
||||
size -= le32_to_cpu(hdr->ucode_size_bytes);
|
||||
|
||||
ptr = adev->uvd.cpu_addr;
|
||||
ptr += le32_to_cpu(hdr->ucode_size_bytes);
|
||||
|
||||
adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
|
||||
memcpy(adev->uvd.saved_bo, ptr, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_uvd_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned size;
|
||||
void *ptr;
|
||||
const struct common_firmware_header *hdr;
|
||||
unsigned offset;
|
||||
|
||||
if (adev->uvd.vcpu_bo == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
|
||||
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
|
||||
memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
|
||||
(adev->uvd.fw->size) - offset);
|
||||
|
||||
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
||||
size -= le32_to_cpu(hdr->ucode_size_bytes);
|
||||
ptr = adev->uvd.cpu_addr;
|
||||
ptr += le32_to_cpu(hdr->ucode_size_bytes);
|
||||
|
||||
if (adev->uvd.saved_bo != NULL) {
|
||||
memcpy(ptr, adev->uvd.saved_bo, size);
|
||||
kfree(adev->uvd.saved_bo);
|
||||
adev->uvd.saved_bo = NULL;
|
||||
} else
|
||||
memset(ptr, 0, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
|
||||
if (handle != 0 && adev->uvd.filp[i] == filp) {
|
||||
struct amdgpu_fence *fence;
|
||||
|
||||
amdgpu_uvd_note_usage(adev);
|
||||
|
||||
r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("Error destroying UVD (%d)!\n", r);
|
||||
continue;
|
||||
}
|
||||
|
||||
amdgpu_fence_wait(fence, false);
|
||||
amdgpu_fence_unref(&fence);
|
||||
|
||||
adev->uvd.filp[i] = NULL;
|
||||
atomic_set(&adev->uvd.handles[i], 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < rbo->placement.num_placement; ++i) {
|
||||
rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
|
||||
rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_cs_pass1 - first parsing round
|
||||
*
|
||||
* @ctx: UVD parser context
|
||||
*
|
||||
* Make sure UVD message and feedback buffers are in VRAM and
|
||||
* nobody is violating an 256MB boundary.
|
||||
*/
|
||||
static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
|
||||
{
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
struct amdgpu_bo *bo;
|
||||
uint32_t cmd, lo, hi;
|
||||
uint64_t addr;
|
||||
int r = 0;
|
||||
|
||||
lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
|
||||
hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
|
||||
addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
|
||||
|
||||
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
|
||||
if (mapping == NULL) {
|
||||
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ctx->parser->adev->uvd.address_64_bit) {
|
||||
/* check if it's a message or feedback command */
|
||||
cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
|
||||
if (cmd == 0x0 || cmd == 0x3) {
|
||||
/* yes, force it into VRAM */
|
||||
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||
}
|
||||
amdgpu_uvd_force_into_uvd_segment(bo);
|
||||
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_cs_msg_decode - handle UVD decode message
|
||||
*
|
||||
* @msg: pointer to message structure
|
||||
* @buf_sizes: returned buffer sizes
|
||||
*
|
||||
* Peek into the decode message and calculate the necessary buffer sizes.
|
||||
*/
|
||||
static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
|
||||
{
|
||||
unsigned stream_type = msg[4];
|
||||
unsigned width = msg[6];
|
||||
unsigned height = msg[7];
|
||||
unsigned dpb_size = msg[9];
|
||||
unsigned pitch = msg[28];
|
||||
unsigned level = msg[57];
|
||||
|
||||
unsigned width_in_mb = width / 16;
|
||||
unsigned height_in_mb = ALIGN(height / 16, 2);
|
||||
unsigned fs_in_mb = width_in_mb * height_in_mb;
|
||||
|
||||
unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
|
||||
|
||||
image_size = width * height;
|
||||
image_size += image_size / 2;
|
||||
image_size = ALIGN(image_size, 1024);
|
||||
|
||||
switch (stream_type) {
|
||||
case 0: /* H264 */
|
||||
case 7: /* H264 Perf */
|
||||
switch(level) {
|
||||
case 30:
|
||||
num_dpb_buffer = 8100 / fs_in_mb;
|
||||
break;
|
||||
case 31:
|
||||
num_dpb_buffer = 18000 / fs_in_mb;
|
||||
break;
|
||||
case 32:
|
||||
num_dpb_buffer = 20480 / fs_in_mb;
|
||||
break;
|
||||
case 41:
|
||||
num_dpb_buffer = 32768 / fs_in_mb;
|
||||
break;
|
||||
case 42:
|
||||
num_dpb_buffer = 34816 / fs_in_mb;
|
||||
break;
|
||||
case 50:
|
||||
num_dpb_buffer = 110400 / fs_in_mb;
|
||||
break;
|
||||
case 51:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
break;
|
||||
default:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
break;
|
||||
}
|
||||
num_dpb_buffer++;
|
||||
if (num_dpb_buffer > 17)
|
||||
num_dpb_buffer = 17;
|
||||
|
||||
/* reference picture buffer */
|
||||
min_dpb_size = image_size * num_dpb_buffer;
|
||||
|
||||
/* macroblock context buffer */
|
||||
min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
|
||||
|
||||
/* IT surface buffer */
|
||||
min_dpb_size += width_in_mb * height_in_mb * 32;
|
||||
break;
|
||||
|
||||
case 1: /* VC1 */
|
||||
|
||||
/* reference picture buffer */
|
||||
min_dpb_size = image_size * 3;
|
||||
|
||||
/* CONTEXT_BUFFER */
|
||||
min_dpb_size += width_in_mb * height_in_mb * 128;
|
||||
|
||||
/* IT surface buffer */
|
||||
min_dpb_size += width_in_mb * 64;
|
||||
|
||||
/* DB surface buffer */
|
||||
min_dpb_size += width_in_mb * 128;
|
||||
|
||||
/* BP */
|
||||
tmp = max(width_in_mb, height_in_mb);
|
||||
min_dpb_size += ALIGN(tmp * 7 * 16, 64);
|
||||
break;
|
||||
|
||||
case 3: /* MPEG2 */
|
||||
|
||||
/* reference picture buffer */
|
||||
min_dpb_size = image_size * 3;
|
||||
break;
|
||||
|
||||
case 4: /* MPEG4 */
|
||||
|
||||
/* reference picture buffer */
|
||||
min_dpb_size = image_size * 3;
|
||||
|
||||
/* CM */
|
||||
min_dpb_size += width_in_mb * height_in_mb * 64;
|
||||
|
||||
/* IT surface buffer */
|
||||
min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("UVD codec not handled %d!\n", stream_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (width > pitch) {
|
||||
DRM_ERROR("Invalid UVD decoding target pitch!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dpb_size < min_dpb_size) {
|
||||
DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
|
||||
dpb_size, min_dpb_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf_sizes[0x1] = dpb_size;
|
||||
buf_sizes[0x2] = image_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_cs_msg - handle UVD message
|
||||
*
|
||||
* @ctx: UVD parser context
|
||||
* @bo: buffer object containing the message
|
||||
* @offset: offset into the buffer object
|
||||
*
|
||||
* Peek into the UVD message and extract the session id.
|
||||
* Make sure that we don't open up to many sessions.
|
||||
*/
|
||||
static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
|
||||
struct amdgpu_bo *bo, unsigned offset)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->parser->adev;
|
||||
int32_t *msg, msg_type, handle;
|
||||
struct fence *f;
|
||||
void *ptr;
|
||||
|
||||
int i, r;
|
||||
|
||||
if (offset & 0x3F) {
|
||||
DRM_ERROR("UVD messages must be 64 byte aligned!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
f = reservation_object_get_excl(bo->tbo.resv);
|
||||
if (f) {
|
||||
r = amdgpu_fence_wait((struct amdgpu_fence *)f, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(bo, &ptr);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
msg = ptr + offset;
|
||||
|
||||
msg_type = msg[1];
|
||||
handle = msg[2];
|
||||
|
||||
if (handle == 0) {
|
||||
DRM_ERROR("Invalid UVD handle!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (msg_type == 1) {
|
||||
/* it's a decode msg, calc buffer sizes */
|
||||
r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
|
||||
amdgpu_bo_kunmap(bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
} else if (msg_type == 2) {
|
||||
/* it's a destroy msg, free the handle */
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
|
||||
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
|
||||
amdgpu_bo_kunmap(bo);
|
||||
return 0;
|
||||
} else {
|
||||
/* it's a create msg */
|
||||
amdgpu_bo_kunmap(bo);
|
||||
|
||||
if (msg_type != 0) {
|
||||
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* it's a create msg, no special handling needed */
|
||||
}
|
||||
|
||||
/* create or decode, validate the handle */
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
if (atomic_read(&adev->uvd.handles[i]) == handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* handle not found try to alloc a new one */
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
|
||||
adev->uvd.filp[i] = ctx->parser->filp;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("No more free UVD handles!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_cs_pass2 - second parsing round
|
||||
*
|
||||
* @ctx: UVD parser context
|
||||
*
|
||||
* Patch buffer addresses, make sure buffer sizes are correct.
|
||||
*/
|
||||
static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
|
||||
{
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
struct amdgpu_bo *bo;
|
||||
struct amdgpu_ib *ib;
|
||||
uint32_t cmd, lo, hi;
|
||||
uint64_t start, end;
|
||||
uint64_t addr;
|
||||
int r;
|
||||
|
||||
lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
|
||||
hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
|
||||
addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
|
||||
|
||||
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
|
||||
if (mapping == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
start = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
end = (mapping->it.last + 1 - mapping->it.start);
|
||||
end = end * AMDGPU_GPU_PAGE_SIZE + start;
|
||||
|
||||
addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
|
||||
start += addr;
|
||||
|
||||
ib = &ctx->parser->ibs[ctx->ib_idx];
|
||||
ib->ptr[ctx->data0] = start & 0xFFFFFFFF;
|
||||
ib->ptr[ctx->data1] = start >> 32;
|
||||
|
||||
cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
|
||||
if (cmd < 0x4) {
|
||||
if ((end - start) < ctx->buf_sizes[cmd]) {
|
||||
DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
|
||||
(unsigned)(end - start),
|
||||
ctx->buf_sizes[cmd]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
} else if ((cmd != 0x100) && (cmd != 0x204)) {
|
||||
DRM_ERROR("invalid UVD command %X!\n", cmd);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ctx->parser->adev->uvd.address_64_bit) {
|
||||
if ((start >> 28) != ((end - 1) >> 28)) {
|
||||
DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
|
||||
start, end);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((cmd == 0 || cmd == 0x3) &&
|
||||
(start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
|
||||
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
|
||||
start, end);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (cmd == 0) {
|
||||
ctx->has_msg_cmd = true;
|
||||
r = amdgpu_uvd_cs_msg(ctx, bo, addr);
|
||||
if (r)
|
||||
return r;
|
||||
} else if (!ctx->has_msg_cmd) {
|
||||
DRM_ERROR("Message needed before other commands are send!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_cs_reg - parse register writes
|
||||
*
|
||||
* @ctx: UVD parser context
|
||||
* @cb: callback function
|
||||
*
|
||||
* Parse the register writes, call cb on each complete command.
|
||||
*/
|
||||
static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
|
||||
int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
|
||||
{
|
||||
struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
|
||||
int i, r;
|
||||
|
||||
ctx->idx++;
|
||||
for (i = 0; i <= ctx->count; ++i) {
|
||||
unsigned reg = ctx->reg + i;
|
||||
|
||||
if (ctx->idx >= ib->length_dw) {
|
||||
DRM_ERROR("Register command after end of CS!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (reg) {
|
||||
case mmUVD_GPCOM_VCPU_DATA0:
|
||||
ctx->data0 = ctx->idx;
|
||||
break;
|
||||
case mmUVD_GPCOM_VCPU_DATA1:
|
||||
ctx->data1 = ctx->idx;
|
||||
break;
|
||||
case mmUVD_GPCOM_VCPU_CMD:
|
||||
r = cb(ctx);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
case mmUVD_ENGINE_CNTL:
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Invalid reg 0x%X!\n", reg);
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->idx++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_cs_packets - parse UVD packets
|
||||
*
|
||||
* @ctx: UVD parser context
|
||||
* @cb: callback function
|
||||
*
|
||||
* Parse the command stream packets.
|
||||
*/
|
||||
static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
|
||||
int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
|
||||
{
|
||||
struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
|
||||
int r;
|
||||
|
||||
for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
|
||||
uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
|
||||
unsigned type = CP_PACKET_GET_TYPE(cmd);
|
||||
switch (type) {
|
||||
case PACKET_TYPE0:
|
||||
ctx->reg = CP_PACKET0_GET_REG(cmd);
|
||||
ctx->count = CP_PACKET_GET_COUNT(cmd);
|
||||
r = amdgpu_uvd_cs_reg(ctx, cb);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
case PACKET_TYPE2:
|
||||
++ctx->idx;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown packet type %d !\n", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_ring_parse_cs - UVD command submission parser
|
||||
*
|
||||
* @parser: Command submission parser context
|
||||
*
|
||||
* Parse the command stream, patch in addresses as necessary.
|
||||
*/
|
||||
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
|
||||
{
|
||||
struct amdgpu_uvd_cs_ctx ctx = {};
|
||||
unsigned buf_sizes[] = {
|
||||
[0x00000000] = 2048,
|
||||
[0x00000001] = 32 * 1024 * 1024,
|
||||
[0x00000002] = 2048 * 1152 * 3,
|
||||
[0x00000003] = 2048,
|
||||
};
|
||||
struct amdgpu_ib *ib = &parser->ibs[ib_idx];
|
||||
int r;
|
||||
|
||||
if (ib->length_dw % 16) {
|
||||
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
|
||||
ib->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx.parser = parser;
|
||||
ctx.buf_sizes = buf_sizes;
|
||||
ctx.ib_idx = ib_idx;
|
||||
|
||||
/* first round, make sure the buffers are actually in the UVD segment */
|
||||
r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* second round, patch buffer addresses into the command stream */
|
||||
r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (!ctx.has_msg_cmd) {
|
||||
DRM_ERROR("UVD-IBs need a msg command!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
amdgpu_uvd_note_usage(ctx.parser->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
|
||||
struct amdgpu_bo *bo,
|
||||
struct amdgpu_fence **fence)
|
||||
{
|
||||
struct ttm_validate_buffer tv;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head head;
|
||||
struct amdgpu_ib ib;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
memset(&tv, 0, sizeof(tv));
|
||||
tv.bo = &bo->tbo;
|
||||
|
||||
INIT_LIST_HEAD(&head);
|
||||
list_add(&tv.head, &head);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (!bo->adev->uvd.address_64_bit) {
|
||||
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
amdgpu_uvd_force_into_uvd_segment(bo);
|
||||
}
|
||||
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
r = amdgpu_ib_get(ring, NULL, 64, &ib);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
|
||||
ib.ptr[1] = addr;
|
||||
ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
|
||||
ib.ptr[3] = addr >> 32;
|
||||
ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
|
||||
ib.ptr[5] = 0;
|
||||
for (i = 6; i < 16; ++i)
|
||||
ib.ptr[i] = PACKET2(0);
|
||||
ib.length_dw = 16;
|
||||
|
||||
r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
|
||||
if (r)
|
||||
goto err;
|
||||
ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
|
||||
|
||||
if (fence)
|
||||
*fence = amdgpu_fence_ref(ib.fence);
|
||||
|
||||
amdgpu_ib_free(ring->adev, &ib);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
ttm_eu_backoff_reservation(&ticket, &head);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* multiple fence commands without any stream commands in between can
|
||||
crash the vcpu so just try to emmit a dummy create/destroy msg to
|
||||
avoid this */
|
||||
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_bo *bo;
|
||||
uint32_t *msg;
|
||||
int r, i;
|
||||
|
||||
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_reserve(bo, false);
|
||||
if (r) {
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(bo, (void **)&msg);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* stitch together an UVD create msg */
|
||||
msg[0] = cpu_to_le32(0x00000de4);
|
||||
msg[1] = cpu_to_le32(0x00000000);
|
||||
msg[2] = cpu_to_le32(handle);
|
||||
msg[3] = cpu_to_le32(0x00000000);
|
||||
msg[4] = cpu_to_le32(0x00000000);
|
||||
msg[5] = cpu_to_le32(0x00000000);
|
||||
msg[6] = cpu_to_le32(0x00000000);
|
||||
msg[7] = cpu_to_le32(0x00000780);
|
||||
msg[8] = cpu_to_le32(0x00000440);
|
||||
msg[9] = cpu_to_le32(0x00000000);
|
||||
msg[10] = cpu_to_le32(0x01b37000);
|
||||
for (i = 11; i < 1024; ++i)
|
||||
msg[i] = cpu_to_le32(0x0);
|
||||
|
||||
amdgpu_bo_kunmap(bo);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
|
||||
return amdgpu_uvd_send_msg(ring, bo, fence);
|
||||
}
|
||||
|
||||
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_bo *bo;
|
||||
uint32_t *msg;
|
||||
int r, i;
|
||||
|
||||
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_reserve(bo, false);
|
||||
if (r) {
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(bo, (void **)&msg);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* stitch together an UVD destroy msg */
|
||||
msg[0] = cpu_to_le32(0x00000de4);
|
||||
msg[1] = cpu_to_le32(0x00000002);
|
||||
msg[2] = cpu_to_le32(handle);
|
||||
msg[3] = cpu_to_le32(0x00000000);
|
||||
for (i = 4; i < 1024; ++i)
|
||||
msg[i] = cpu_to_le32(0x0);
|
||||
|
||||
amdgpu_bo_kunmap(bo);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
|
||||
return amdgpu_uvd_send_msg(ring, bo, fence);
|
||||
}
|
||||
|
||||
static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_device *adev =
|
||||
container_of(work, struct amdgpu_device, uvd.idle_work.work);
|
||||
unsigned i, fences, handles = 0;
|
||||
|
||||
fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
++handles;
|
||||
|
||||
if (fences == 0 && handles == 0) {
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||
}
|
||||
} else {
|
||||
schedule_delayed_work(&adev->uvd.idle_work,
|
||||
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
|
||||
{
|
||||
bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
|
||||
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
|
||||
|
||||
if (set_clocks) {
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_uvd(adev, true);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_UVD_H__
|
||||
#define __AMDGPU_UVD_H__
|
||||
|
||||
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
|
||||
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
|
||||
int amdgpu_uvd_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_fence **fence);
|
||||
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_fence **fence);
|
||||
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
|
||||
struct drm_file *filp);
|
||||
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,727 @@
|
|||
/*
|
||||
* Copyright 2013 Advanced Micro Devices, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* Authors: Christian König <christian.koenig@amd.com>
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/module.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_vce.h"
|
||||
#include "cikd.h"
|
||||
|
||||
/* 1 second timeout */
|
||||
#define VCE_IDLE_TIMEOUT_MS 1000
|
||||
|
||||
/* Firmware Names */
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
|
||||
#define FIRMWARE_KABINI "radeon/kabini_vce.bin"
|
||||
#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
|
||||
#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
|
||||
#define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
|
||||
#endif
|
||||
#define FIRMWARE_TONGA "radeon/tonga_vce.bin"
|
||||
#define FIRMWARE_CARRIZO "radeon/carrizo_vce.bin"
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
|
||||
MODULE_FIRMWARE(FIRMWARE_KABINI);
|
||||
MODULE_FIRMWARE(FIRMWARE_KAVERI);
|
||||
MODULE_FIRMWARE(FIRMWARE_HAWAII);
|
||||
MODULE_FIRMWARE(FIRMWARE_MULLINS);
|
||||
#endif
|
||||
MODULE_FIRMWARE(FIRMWARE_TONGA);
|
||||
MODULE_FIRMWARE(FIRMWARE_CARRIZO);
|
||||
|
||||
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
|
||||
|
||||
/**
|
||||
* amdgpu_vce_init - allocate memory, load vce firmware
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* First step to get VCE online, allocate memory and load the firmware
|
||||
*/
|
||||
int amdgpu_vce_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned long size;
|
||||
const char *fw_name;
|
||||
const struct common_firmware_header *hdr;
|
||||
unsigned ucode_version, version_major, version_minor, binary_id;
|
||||
int i, r;
|
||||
|
||||
INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_BONAIRE:
|
||||
fw_name = FIRMWARE_BONAIRE;
|
||||
break;
|
||||
case CHIP_KAVERI:
|
||||
fw_name = FIRMWARE_KAVERI;
|
||||
break;
|
||||
case CHIP_KABINI:
|
||||
fw_name = FIRMWARE_KABINI;
|
||||
break;
|
||||
case CHIP_HAWAII:
|
||||
fw_name = FIRMWARE_HAWAII;
|
||||
break;
|
||||
case CHIP_MULLINS:
|
||||
fw_name = FIRMWARE_MULLINS;
|
||||
break;
|
||||
#endif
|
||||
case CHIP_TONGA:
|
||||
fw_name = FIRMWARE_TONGA;
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
fw_name = FIRMWARE_CARRIZO;
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
|
||||
fw_name);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ucode_validate(adev->vce.fw);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
|
||||
fw_name);
|
||||
release_firmware(adev->vce.fw);
|
||||
adev->vce.fw = NULL;
|
||||
return r;
|
||||
}
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
|
||||
|
||||
ucode_version = le32_to_cpu(hdr->ucode_version);
|
||||
version_major = (ucode_version >> 20) & 0xfff;
|
||||
version_minor = (ucode_version >> 8) & 0xfff;
|
||||
binary_id = ucode_version & 0xff;
|
||||
DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
|
||||
version_major, version_minor, binary_id);
|
||||
adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
|
||||
(binary_id << 8));
|
||||
|
||||
/* allocate firmware, stack and heap BO */
|
||||
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes)) +
|
||||
AMDGPU_VCE_STACK_SIZE + AMDGPU_VCE_HEAP_SIZE;
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
|
||||
if (r) {
|
||||
amdgpu_bo_unref(&adev->vce.vcpu_bo);
|
||||
dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->vce.gpu_addr);
|
||||
amdgpu_bo_unreserve(adev->vce.vcpu_bo);
|
||||
if (r) {
|
||||
amdgpu_bo_unref(&adev->vce.vcpu_bo);
|
||||
dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
|
||||
atomic_set(&adev->vce.handles[i], 0);
|
||||
adev->vce.filp[i] = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_fini - free memory
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Last step on VCE teardown, free firmware memory
|
||||
*/
|
||||
int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->vce.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
amdgpu_bo_unref(&adev->vce.vcpu_bo);
|
||||
|
||||
amdgpu_ring_fini(&adev->vce.ring[0]);
|
||||
amdgpu_ring_fini(&adev->vce.ring[1]);
|
||||
|
||||
release_firmware(adev->vce.fw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_suspend - unpin VCE fw memory
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
*/
|
||||
int amdgpu_vce_suspend(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (adev->vce.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
|
||||
if (atomic_read(&adev->vce.handles[i]))
|
||||
break;
|
||||
|
||||
if (i == AMDGPU_MAX_VCE_HANDLES)
|
||||
return 0;
|
||||
|
||||
/* TODO: suspending running encoding sessions isn't supported */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_resume - pin VCE fw memory
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
*/
|
||||
int amdgpu_vce_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
void *cpu_addr;
|
||||
const struct common_firmware_header *hdr;
|
||||
unsigned offset;
|
||||
int r;
|
||||
|
||||
if (adev->vce.vcpu_bo == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->vce.vcpu_bo);
|
||||
dev_err(adev->dev, "(%d) VCE map failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
|
||||
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
|
||||
memcpy(cpu_addr, (adev->vce.fw->data) + offset,
|
||||
(adev->vce.fw->size) - offset);
|
||||
|
||||
amdgpu_bo_kunmap(adev->vce.vcpu_bo);
|
||||
|
||||
amdgpu_bo_unreserve(adev->vce.vcpu_bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_idle_work_handler - power off VCE
|
||||
*
|
||||
* @work: pointer to work structure
|
||||
*
|
||||
* power of VCE when it's not used any more
|
||||
*/
|
||||
static void amdgpu_vce_idle_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_device *adev =
|
||||
container_of(work, struct amdgpu_device, vce.idle_work.work);
|
||||
|
||||
if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
|
||||
(amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_vce(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_vce_clocks(adev, 0, 0);
|
||||
}
|
||||
} else {
|
||||
schedule_delayed_work(&adev->vce.idle_work,
|
||||
msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_note_usage - power up VCE
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Make sure VCE is powerd up when we want to use it
|
||||
*/
|
||||
static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
|
||||
{
|
||||
bool streams_changed = false;
|
||||
bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
|
||||
msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
/* XXX figure out if the streams changed */
|
||||
streams_changed = false;
|
||||
}
|
||||
|
||||
if (set_clocks || streams_changed) {
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_vce(adev, true);
|
||||
} else {
|
||||
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_free_handles - free still open VCE handles
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @filp: drm file pointer
|
||||
*
|
||||
* Close all VCE handles still open by this file pointer
|
||||
*/
|
||||
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_ring *ring = &adev->vce.ring[0];
|
||||
int i, r;
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
|
||||
uint32_t handle = atomic_read(&adev->vce.handles[i]);
|
||||
if (!handle || adev->vce.filp[i] != filp)
|
||||
continue;
|
||||
|
||||
amdgpu_vce_note_usage(adev);
|
||||
|
||||
r = amdgpu_vce_get_destroy_msg(ring, handle, NULL);
|
||||
if (r)
|
||||
DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
|
||||
|
||||
adev->vce.filp[i] = NULL;
|
||||
atomic_set(&adev->vce.handles[i], 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_get_create_msg - generate a VCE create msg
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: ring we should submit the msg to
|
||||
* @handle: VCE session handle to use
|
||||
* @fence: optional fence to return
|
||||
*
|
||||
* Open up a stream for HW test
|
||||
*/
|
||||
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 1024;
|
||||
struct amdgpu_ib ib;
|
||||
uint64_t dummy;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
dummy = ib.gpu_addr + 1024;
|
||||
|
||||
/* stitch together an VCE create msg */
|
||||
ib.length_dw = 0;
|
||||
ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
|
||||
ib.ptr[ib.length_dw++] = handle;
|
||||
|
||||
ib.ptr[ib.length_dw++] = 0x00000030; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
|
||||
ib.ptr[ib.length_dw++] = 0x00000000;
|
||||
ib.ptr[ib.length_dw++] = 0x00000042;
|
||||
ib.ptr[ib.length_dw++] = 0x0000000a;
|
||||
ib.ptr[ib.length_dw++] = 0x00000001;
|
||||
ib.ptr[ib.length_dw++] = 0x00000080;
|
||||
ib.ptr[ib.length_dw++] = 0x00000060;
|
||||
ib.ptr[ib.length_dw++] = 0x00000100;
|
||||
ib.ptr[ib.length_dw++] = 0x00000100;
|
||||
ib.ptr[ib.length_dw++] = 0x0000000c;
|
||||
ib.ptr[ib.length_dw++] = 0x00000000;
|
||||
|
||||
ib.ptr[ib.length_dw++] = 0x00000014; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
|
||||
ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
|
||||
ib.ptr[ib.length_dw++] = dummy;
|
||||
ib.ptr[ib.length_dw++] = 0x00000001;
|
||||
|
||||
for (i = ib.length_dw; i < ib_size_dw; ++i)
|
||||
ib.ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
|
||||
}
|
||||
|
||||
if (fence)
|
||||
*fence = amdgpu_fence_ref(ib.fence);
|
||||
|
||||
amdgpu_ib_free(ring->adev, &ib);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: ring we should submit the msg to
|
||||
* @handle: VCE session handle to use
|
||||
* @fence: optional fence to return
|
||||
*
|
||||
* Close up a stream for HW test or if userspace failed to do so
|
||||
*/
|
||||
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 1024;
|
||||
struct amdgpu_ib ib;
|
||||
uint64_t dummy;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
dummy = ib.gpu_addr + 1024;
|
||||
|
||||
/* stitch together an VCE destroy msg */
|
||||
ib.length_dw = 0;
|
||||
ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
|
||||
ib.ptr[ib.length_dw++] = handle;
|
||||
|
||||
ib.ptr[ib.length_dw++] = 0x00000014; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
|
||||
ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
|
||||
ib.ptr[ib.length_dw++] = dummy;
|
||||
ib.ptr[ib.length_dw++] = 0x00000001;
|
||||
|
||||
ib.ptr[ib.length_dw++] = 0x00000008; /* len */
|
||||
ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
|
||||
|
||||
for (i = ib.length_dw; i < ib_size_dw; ++i)
|
||||
ib.ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
|
||||
}
|
||||
|
||||
if (fence)
|
||||
*fence = amdgpu_fence_ref(ib.fence);
|
||||
|
||||
amdgpu_ib_free(ring->adev, &ib);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_cs_reloc - command submission relocation
|
||||
*
|
||||
* @p: parser context
|
||||
* @lo: address of lower dword
|
||||
* @hi: address of higher dword
|
||||
*
|
||||
* Patch relocation inside command stream with real buffer address
|
||||
*/
|
||||
int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi)
|
||||
{
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
struct amdgpu_ib *ib = &p->ibs[ib_idx];
|
||||
struct amdgpu_bo *bo;
|
||||
uint64_t addr;
|
||||
|
||||
addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
|
||||
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
|
||||
|
||||
mapping = amdgpu_cs_find_mapping(p, addr, &bo);
|
||||
if (mapping == NULL) {
|
||||
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d\n",
|
||||
addr, lo, hi);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
|
||||
addr += amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->ptr[lo] = addr & 0xFFFFFFFF;
|
||||
ib->ptr[hi] = addr >> 32;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_cs_parse - parse and validate the command stream
|
||||
*
|
||||
* @p: parser context
|
||||
*
|
||||
*/
|
||||
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
|
||||
{
|
||||
uint32_t handle = 0;
|
||||
bool destroy = false;
|
||||
int i, r, idx = 0;
|
||||
struct amdgpu_ib *ib = &p->ibs[ib_idx];
|
||||
|
||||
amdgpu_vce_note_usage(p->adev);
|
||||
|
||||
while (idx < ib->length_dw) {
|
||||
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
|
||||
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
|
||||
|
||||
if ((len < 8) || (len & 3)) {
|
||||
DRM_ERROR("invalid VCE command length (%d)!\n", len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
case 0x00000001: // session
|
||||
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
|
||||
break;
|
||||
|
||||
case 0x00000002: // task info
|
||||
case 0x01000001: // create
|
||||
case 0x04000001: // config extension
|
||||
case 0x04000002: // pic control
|
||||
case 0x04000005: // rate control
|
||||
case 0x04000007: // motion estimation
|
||||
case 0x04000008: // rdo
|
||||
case 0x04000009: // vui
|
||||
case 0x05000002: // auxiliary buffer
|
||||
break;
|
||||
|
||||
case 0x03000001: // encode
|
||||
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
|
||||
case 0x02000001: // destroy
|
||||
destroy = true;
|
||||
break;
|
||||
|
||||
case 0x05000001: // context buffer
|
||||
case 0x05000004: // video bitstream buffer
|
||||
case 0x05000005: // feedback buffer
|
||||
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
idx += len / 4;
|
||||
}
|
||||
|
||||
if (destroy) {
|
||||
/* IB contains a destroy msg, free the handle */
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
|
||||
atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* create or encode, validate the handle */
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
|
||||
if (atomic_read(&p->adev->vce.handles[i]) == handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* handle not found try to alloc a new one */
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
|
||||
if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
|
||||
p->adev->vce.filp[i] = p->filp;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("No more free VCE handles!\n");
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_ring_emit_semaphore - emit a semaphore command
|
||||
*
|
||||
* @ring: engine to use
|
||||
* @semaphore: address of semaphore
|
||||
* @emit_wait: true=emit wait, false=emit signal
|
||||
*
|
||||
*/
|
||||
bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
|
||||
amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE);
|
||||
amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
|
||||
amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
|
||||
amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
|
||||
if (!emit_wait)
|
||||
amdgpu_ring_write(ring, VCE_CMD_END);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_ring_emit_ib - execute indirect buffer
|
||||
*
|
||||
* @ring: engine to use
|
||||
* @ib: the IB to execute
|
||||
*
|
||||
*/
|
||||
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
|
||||
{
|
||||
amdgpu_ring_write(ring, VCE_CMD_IB);
|
||||
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
||||
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
|
||||
amdgpu_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_ring_emit_fence - add a fence command to the ring
|
||||
*
|
||||
* @ring: engine to use
|
||||
* @fence: the fence
|
||||
*
|
||||
*/
|
||||
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
bool write64bits)
|
||||
{
|
||||
WARN_ON(write64bits);
|
||||
|
||||
amdgpu_ring_write(ring, VCE_CMD_FENCE);
|
||||
amdgpu_ring_write(ring, addr);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
amdgpu_ring_write(ring, seq);
|
||||
amdgpu_ring_write(ring, VCE_CMD_TRAP);
|
||||
amdgpu_ring_write(ring, VCE_CMD_END);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_ring_test_ring - test if VCE ring is working
|
||||
*
|
||||
* @ring: the engine to test on
|
||||
*
|
||||
*/
|
||||
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t rptr = amdgpu_ring_get_rptr(ring);
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
r = amdgpu_ring_lock(ring, 16);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
|
||||
ring->idx, r);
|
||||
return r;
|
||||
}
|
||||
amdgpu_ring_write(ring, VCE_CMD_END);
|
||||
amdgpu_ring_unlock_commit(ring);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (amdgpu_ring_get_rptr(ring) != rptr)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
if (i < adev->usec_timeout) {
|
||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
||||
ring->idx, i);
|
||||
} else {
|
||||
DRM_ERROR("amdgpu: ring %d test failed\n",
|
||||
ring->idx);
|
||||
r = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_ring_test_ib - test if VCE IBs are working
|
||||
*
|
||||
* @ring: the engine to test on
|
||||
*
|
||||
*/
|
||||
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_fence *fence = NULL;
|
||||
int r;
|
||||
|
||||
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
|
||||
goto error;
|
||||
}
|
||||
|
||||
r = amdgpu_vce_get_destroy_msg(ring, 1, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
|
||||
goto error;
|
||||
}
|
||||
|
||||
r = amdgpu_fence_wait(fence, false);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
|
||||
} else {
|
||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
||||
}
|
||||
error:
|
||||
amdgpu_fence_unref(&fence);
|
||||
return r;
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_VCE_H__
|
||||
#define __AMDGPU_VCE_H__
|
||||
|
||||
int amdgpu_vce_sw_init(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_suspend(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_fence **fence);
|
||||
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_fence **fence);
|
||||
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
|
||||
int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi);
|
||||
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||
bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
||||
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
bool write64bit);
|
||||
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
|
||||
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring);
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Stanislaw Skowronek
|
||||
*/
|
||||
|
||||
#ifndef ATOM_BITS_H
|
||||
#define ATOM_BITS_H
|
||||
|
||||
static inline uint8_t get_u8(void *bios, int ptr)
|
||||
{
|
||||
return ((unsigned char *)bios)[ptr];
|
||||
}
|
||||
#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
|
||||
#define CU8(ptr) get_u8(ctx->bios, (ptr))
|
||||
static inline uint16_t get_u16(void *bios, int ptr)
|
||||
{
|
||||
return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
|
||||
}
|
||||
#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
|
||||
#define CU16(ptr) get_u16(ctx->bios, (ptr))
|
||||
static inline uint32_t get_u32(void *bios, int ptr)
|
||||
{
|
||||
return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
|
||||
}
|
||||
#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
|
||||
#define CU32(ptr) get_u32(ctx->bios, (ptr))
|
||||
#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
|
||||
|
||||
#endif
|
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Stanislaw Skowronek
|
||||
*/
|
||||
|
||||
#ifndef ATOM_NAMES_H
|
||||
#define ATOM_NAMES_H
|
||||
|
||||
#include "atom.h"
|
||||
|
||||
#ifdef ATOM_DEBUG
|
||||
|
||||
#define ATOM_OP_NAMES_CNT 123
|
||||
static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
|
||||
"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
|
||||
"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
|
||||
"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
|
||||
"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
|
||||
"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
|
||||
"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
|
||||
"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
|
||||
"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
|
||||
"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
|
||||
"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
|
||||
"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
|
||||
"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
|
||||
"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
|
||||
"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
|
||||
"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
|
||||
"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
|
||||
"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
|
||||
"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
|
||||
"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
|
||||
"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
|
||||
"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
|
||||
"DEBUG", "CTB_DS",
|
||||
};
|
||||
|
||||
#define ATOM_TABLE_NAMES_CNT 74
|
||||
static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
|
||||
"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
|
||||
"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
|
||||
"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
|
||||
"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
|
||||
"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
|
||||
"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
|
||||
"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
|
||||
"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
|
||||
"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
|
||||
"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
|
||||
"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
|
||||
"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
|
||||
"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
|
||||
"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
|
||||
"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
|
||||
"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
|
||||
"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
|
||||
"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
|
||||
"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
|
||||
"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
|
||||
"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
|
||||
"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
|
||||
"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
|
||||
"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
|
||||
"MemoryDeviceInit", "EnableYUV",
|
||||
};
|
||||
|
||||
#define ATOM_IO_NAMES_CNT 5
|
||||
static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
|
||||
"MM", "PLL", "MC", "PCIE", "PCIE PORT",
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
#define ATOM_OP_NAMES_CNT 0
|
||||
#define ATOM_TABLE_NAMES_CNT 0
|
||||
#define ATOM_IO_NAMES_CNT 0
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Dave Airlie
|
||||
*/
|
||||
|
||||
#ifndef ATOM_TYPES_H
|
||||
#define ATOM_TYPES_H
|
||||
|
||||
/* sync atom types to kernel types */
|
||||
|
||||
typedef uint16_t USHORT;
|
||||
typedef uint32_t ULONG;
|
||||
typedef uint8_t UCHAR;
|
||||
|
||||
|
||||
#ifndef ATOM_BIG_ENDIAN
|
||||
#if defined(__BIG_ENDIAN)
|
||||
#define ATOM_BIG_ENDIAN 1
|
||||
#else
|
||||
#define ATOM_BIG_ENDIAN 0
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Stanislaw Skowronek
|
||||
*/
|
||||
|
||||
#ifndef ATOM_H
|
||||
#define ATOM_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#define ATOM_BIOS_MAGIC 0xAA55
|
||||
#define ATOM_ATI_MAGIC_PTR 0x30
|
||||
#define ATOM_ATI_MAGIC " 761295520"
|
||||
#define ATOM_ROM_TABLE_PTR 0x48
|
||||
|
||||
#define ATOM_ROM_MAGIC "ATOM"
|
||||
#define ATOM_ROM_MAGIC_PTR 4
|
||||
|
||||
#define ATOM_ROM_MSG_PTR 0x10
|
||||
#define ATOM_ROM_CMD_PTR 0x1E
|
||||
#define ATOM_ROM_DATA_PTR 0x20
|
||||
|
||||
#define ATOM_CMD_INIT 0
|
||||
#define ATOM_CMD_SETSCLK 0x0A
|
||||
#define ATOM_CMD_SETMCLK 0x0B
|
||||
#define ATOM_CMD_SETPCLK 0x0C
|
||||
#define ATOM_CMD_SPDFANCNTL 0x39
|
||||
|
||||
#define ATOM_DATA_FWI_PTR 0xC
|
||||
#define ATOM_DATA_IIO_PTR 0x32
|
||||
|
||||
#define ATOM_FWI_DEFSCLK_PTR 8
|
||||
#define ATOM_FWI_DEFMCLK_PTR 0xC
|
||||
#define ATOM_FWI_MAXSCLK_PTR 0x24
|
||||
#define ATOM_FWI_MAXMCLK_PTR 0x28
|
||||
|
||||
#define ATOM_CT_SIZE_PTR 0
|
||||
#define ATOM_CT_WS_PTR 4
|
||||
#define ATOM_CT_PS_PTR 5
|
||||
#define ATOM_CT_PS_MASK 0x7F
|
||||
#define ATOM_CT_CODE_PTR 6
|
||||
|
||||
#define ATOM_OP_CNT 123
|
||||
#define ATOM_OP_EOT 91
|
||||
|
||||
#define ATOM_CASE_MAGIC 0x63
|
||||
#define ATOM_CASE_END 0x5A5A
|
||||
|
||||
#define ATOM_ARG_REG 0
|
||||
#define ATOM_ARG_PS 1
|
||||
#define ATOM_ARG_WS 2
|
||||
#define ATOM_ARG_FB 3
|
||||
#define ATOM_ARG_ID 4
|
||||
#define ATOM_ARG_IMM 5
|
||||
#define ATOM_ARG_PLL 6
|
||||
#define ATOM_ARG_MC 7
|
||||
|
||||
#define ATOM_SRC_DWORD 0
|
||||
#define ATOM_SRC_WORD0 1
|
||||
#define ATOM_SRC_WORD8 2
|
||||
#define ATOM_SRC_WORD16 3
|
||||
#define ATOM_SRC_BYTE0 4
|
||||
#define ATOM_SRC_BYTE8 5
|
||||
#define ATOM_SRC_BYTE16 6
|
||||
#define ATOM_SRC_BYTE24 7
|
||||
|
||||
#define ATOM_WS_QUOTIENT 0x40
|
||||
#define ATOM_WS_REMAINDER 0x41
|
||||
#define ATOM_WS_DATAPTR 0x42
|
||||
#define ATOM_WS_SHIFT 0x43
|
||||
#define ATOM_WS_OR_MASK 0x44
|
||||
#define ATOM_WS_AND_MASK 0x45
|
||||
#define ATOM_WS_FB_WINDOW 0x46
|
||||
#define ATOM_WS_ATTRIBUTES 0x47
|
||||
#define ATOM_WS_REGPTR 0x48
|
||||
|
||||
#define ATOM_IIO_NOP 0
|
||||
#define ATOM_IIO_START 1
|
||||
#define ATOM_IIO_READ 2
|
||||
#define ATOM_IIO_WRITE 3
|
||||
#define ATOM_IIO_CLEAR 4
|
||||
#define ATOM_IIO_SET 5
|
||||
#define ATOM_IIO_MOVE_INDEX 6
|
||||
#define ATOM_IIO_MOVE_ATTR 7
|
||||
#define ATOM_IIO_MOVE_DATA 8
|
||||
#define ATOM_IIO_END 9
|
||||
|
||||
#define ATOM_IO_MM 0
|
||||
#define ATOM_IO_PCI 1
|
||||
#define ATOM_IO_SYSIO 2
|
||||
#define ATOM_IO_IIO 0x80
|
||||
|
||||
struct card_info {
|
||||
struct drm_device *dev;
|
||||
void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
|
||||
uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
|
||||
void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
|
||||
uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */
|
||||
void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
|
||||
uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
|
||||
void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
|
||||
uint32_t (* pll_read)(struct card_info *, uint32_t); /* filled by driver */
|
||||
};
|
||||
|
||||
struct atom_context {
|
||||
struct card_info *card;
|
||||
struct mutex mutex;
|
||||
void *bios;
|
||||
uint32_t cmd_table, data_table;
|
||||
uint16_t *iio;
|
||||
|
||||
uint16_t data_block;
|
||||
uint32_t fb_base;
|
||||
uint32_t divmul[2];
|
||||
uint16_t io_attr;
|
||||
uint16_t reg_block;
|
||||
uint8_t shift;
|
||||
int cs_equal, cs_above;
|
||||
int io_mode;
|
||||
uint32_t *scratch;
|
||||
int scratch_size_bytes;
|
||||
};
|
||||
|
||||
extern int amdgpu_atom_debug;
|
||||
|
||||
struct atom_context *amdgpu_atom_parse(struct card_info *, void *);
|
||||
int amdgpu_atom_execute_table(struct atom_context *, int, uint32_t *);
|
||||
int amdgpu_atom_asic_init(struct atom_context *);
|
||||
void amdgpu_atom_destroy(struct atom_context *);
|
||||
bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
|
||||
uint8_t *frev, uint8_t *crev, uint16_t *data_start);
|
||||
bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index,
|
||||
uint8_t *frev, uint8_t *crev);
|
||||
int amdgpu_atom_allocate_fb_scratch(struct atom_context *ctx);
|
||||
#include "atom-types.h"
|
||||
#include "atombios.h"
|
||||
#include "ObjectID.h"
|
||||
|
||||
#endif
|
|
@ -0,0 +1,807 @@
|
|||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/drm_fixed.h>
|
||||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
#include "atom-bits.h"
|
||||
#include "atombios_encoders.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "amdgpu_pll.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
|
||||
void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
SET_CRTC_OVERSCAN_PS_ALLOCATION args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
|
||||
int a1, a2;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.ucCRTC = amdgpu_crtc->crtc_id;
|
||||
|
||||
switch (amdgpu_crtc->rmx_type) {
|
||||
case RMX_CENTER:
|
||||
args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
|
||||
args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
|
||||
args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
|
||||
args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
|
||||
break;
|
||||
case RMX_ASPECT:
|
||||
a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
|
||||
a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
|
||||
|
||||
if (a1 > a2) {
|
||||
args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
|
||||
args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
|
||||
} else if (a2 > a1) {
|
||||
args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
|
||||
args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
|
||||
}
|
||||
break;
|
||||
case RMX_FULL:
|
||||
default:
|
||||
args.usOverscanRight = cpu_to_le16(amdgpu_crtc->h_border);
|
||||
args.usOverscanLeft = cpu_to_le16(amdgpu_crtc->h_border);
|
||||
args.usOverscanBottom = cpu_to_le16(amdgpu_crtc->v_border);
|
||||
args.usOverscanTop = cpu_to_le16(amdgpu_crtc->v_border);
|
||||
break;
|
||||
}
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
ENABLE_SCALER_PS_ALLOCATION args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.ucScaler = amdgpu_crtc->crtc_id;
|
||||
|
||||
switch (amdgpu_crtc->rmx_type) {
|
||||
case RMX_FULL:
|
||||
args.ucEnable = ATOM_SCALER_EXPANSION;
|
||||
break;
|
||||
case RMX_CENTER:
|
||||
args.ucEnable = ATOM_SCALER_CENTER;
|
||||
break;
|
||||
case RMX_ASPECT:
|
||||
args.ucEnable = ATOM_SCALER_EXPANSION;
|
||||
break;
|
||||
default:
|
||||
args.ucEnable = ATOM_SCALER_DISABLE;
|
||||
break;
|
||||
}
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int index =
|
||||
GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
|
||||
ENABLE_CRTC_PS_ALLOCATION args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.ucCRTC = amdgpu_crtc->crtc_id;
|
||||
args.ucEnable = lock;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
|
||||
ENABLE_CRTC_PS_ALLOCATION args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.ucCRTC = amdgpu_crtc->crtc_id;
|
||||
args.ucEnable = state;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
|
||||
BLANK_CRTC_PS_ALLOCATION args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.ucCRTC = amdgpu_crtc->crtc_id;
|
||||
args.ucBlanking = state;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
|
||||
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.ucDispPipeId = amdgpu_crtc->crtc_id;
|
||||
args.ucEnable = state;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
|
||||
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.ucEnable = ATOM_INIT;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
|
||||
u16 misc = 0;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (amdgpu_crtc->h_border * 2));
|
||||
args.usH_Blanking_Time =
|
||||
cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (amdgpu_crtc->h_border * 2));
|
||||
args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (amdgpu_crtc->v_border * 2));
|
||||
args.usV_Blanking_Time =
|
||||
cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (amdgpu_crtc->v_border * 2));
|
||||
args.usH_SyncOffset =
|
||||
cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + amdgpu_crtc->h_border);
|
||||
args.usH_SyncWidth =
|
||||
cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
|
||||
args.usV_SyncOffset =
|
||||
cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + amdgpu_crtc->v_border);
|
||||
args.usV_SyncWidth =
|
||||
cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
|
||||
args.ucH_Border = amdgpu_crtc->h_border;
|
||||
args.ucV_Border = amdgpu_crtc->v_border;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
|
||||
misc |= ATOM_VSYNC_POLARITY;
|
||||
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
|
||||
misc |= ATOM_HSYNC_POLARITY;
|
||||
if (mode->flags & DRM_MODE_FLAG_CSYNC)
|
||||
misc |= ATOM_COMPOSITESYNC;
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
misc |= ATOM_INTERLACE;
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
misc |= ATOM_DOUBLE_CLOCK_MODE;
|
||||
|
||||
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
|
||||
args.ucCRTC = amdgpu_crtc->crtc_id;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
union atom_enable_ss {
|
||||
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
|
||||
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
|
||||
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
|
||||
};
|
||||
|
||||
static void amdgpu_atombios_crtc_program_ss(struct amdgpu_device *adev,
|
||||
int enable,
|
||||
int pll_id,
|
||||
int crtc_id,
|
||||
struct amdgpu_atom_ss *ss)
|
||||
{
|
||||
unsigned i;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
|
||||
union atom_enable_ss args;
|
||||
|
||||
if (enable) {
|
||||
/* Don't mess with SS if percentage is 0 or external ss.
|
||||
* SS is already disabled previously, and disabling it
|
||||
* again can cause display problems if the pll is already
|
||||
* programmed.
|
||||
*/
|
||||
if (ss->percentage == 0)
|
||||
return;
|
||||
if (ss->type & ATOM_EXTERNAL_SS_MASK)
|
||||
return;
|
||||
} else {
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
if (adev->mode_info.crtcs[i] &&
|
||||
adev->mode_info.crtcs[i]->enabled &&
|
||||
i != crtc_id &&
|
||||
pll_id == adev->mode_info.crtcs[i]->pll_id) {
|
||||
/* one other crtc is using this pll don't turn
|
||||
* off spread spectrum as it might turn off
|
||||
* display on active crtc
|
||||
*/
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
|
||||
args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
|
||||
switch (pll_id) {
|
||||
case ATOM_PPLL1:
|
||||
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
|
||||
break;
|
||||
case ATOM_PPLL2:
|
||||
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
|
||||
break;
|
||||
case ATOM_DCPLL:
|
||||
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
|
||||
break;
|
||||
case ATOM_PPLL_INVALID:
|
||||
return;
|
||||
}
|
||||
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
|
||||
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
|
||||
args.v3.ucEnable = enable;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
union adjust_pixel_clock {
|
||||
ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
|
||||
ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
|
||||
};
|
||||
|
||||
static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_encoder *encoder = amdgpu_crtc->encoder;
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
|
||||
u32 adjusted_clock = mode->clock;
|
||||
int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
|
||||
u32 dp_clock = mode->clock;
|
||||
u32 clock = mode->clock;
|
||||
int bpc = amdgpu_crtc->bpc;
|
||||
bool is_duallink = amdgpu_dig_monitor_is_duallink(encoder, mode->clock);
|
||||
union adjust_pixel_clock args;
|
||||
u8 frev, crev;
|
||||
int index;
|
||||
|
||||
amdgpu_crtc->pll_flags = AMDGPU_PLL_USE_FRAC_FB_DIV;
|
||||
|
||||
if ((amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
|
||||
(amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
|
||||
if (connector) {
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
struct amdgpu_connector_atom_dig *dig_connector =
|
||||
amdgpu_connector->con_priv;
|
||||
|
||||
dp_clock = dig_connector->dp_clock;
|
||||
}
|
||||
}
|
||||
|
||||
/* use recommended ref_div for ss */
|
||||
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
if (amdgpu_crtc->ss_enabled) {
|
||||
if (amdgpu_crtc->ss.refdiv) {
|
||||
amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_REF_DIV;
|
||||
amdgpu_crtc->pll_reference_div = amdgpu_crtc->ss.refdiv;
|
||||
amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
|
||||
if (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
|
||||
adjusted_clock = mode->clock * 2;
|
||||
if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
|
||||
amdgpu_crtc->pll_flags |= AMDGPU_PLL_PREFER_CLOSEST_LOWER;
|
||||
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
|
||||
amdgpu_crtc->pll_flags |= AMDGPU_PLL_IS_LCD;
|
||||
|
||||
|
||||
/* adjust pll for deep color modes */
|
||||
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
|
||||
switch (bpc) {
|
||||
case 8:
|
||||
default:
|
||||
break;
|
||||
case 10:
|
||||
clock = (clock * 5) / 4;
|
||||
break;
|
||||
case 12:
|
||||
clock = (clock * 3) / 2;
|
||||
break;
|
||||
case 16:
|
||||
clock = clock * 2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
|
||||
* accordingly based on the encoder/transmitter to work around
|
||||
* special hw requirements.
|
||||
*/
|
||||
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
|
||||
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
|
||||
&crev))
|
||||
return adjusted_clock;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
switch (frev) {
|
||||
case 1:
|
||||
switch (crev) {
|
||||
case 1:
|
||||
case 2:
|
||||
args.v1.usPixelClock = cpu_to_le16(clock / 10);
|
||||
args.v1.ucTransmitterID = amdgpu_encoder->encoder_id;
|
||||
args.v1.ucEncodeMode = encoder_mode;
|
||||
if (amdgpu_crtc->ss_enabled && amdgpu_crtc->ss.percentage)
|
||||
args.v1.ucConfig |=
|
||||
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context,
|
||||
index, (uint32_t *)&args);
|
||||
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
|
||||
break;
|
||||
case 3:
|
||||
args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10);
|
||||
args.v3.sInput.ucTransmitterID = amdgpu_encoder->encoder_id;
|
||||
args.v3.sInput.ucEncodeMode = encoder_mode;
|
||||
args.v3.sInput.ucDispPllConfig = 0;
|
||||
if (amdgpu_crtc->ss_enabled && amdgpu_crtc->ss.percentage)
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_SS_ENABLE;
|
||||
if (ENCODER_MODE_IS_DP(encoder_mode)) {
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_COHERENT_MODE;
|
||||
/* 16200 or 27000 */
|
||||
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
|
||||
} else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
|
||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||
if (dig->coherent_mode)
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_COHERENT_MODE;
|
||||
if (is_duallink)
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_DUAL_LINK;
|
||||
}
|
||||
if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
|
||||
ENCODER_OBJECT_ID_NONE)
|
||||
args.v3.sInput.ucExtTransmitterID =
|
||||
amdgpu_encoder_get_dp_bridge_encoder_id(encoder);
|
||||
else
|
||||
args.v3.sInput.ucExtTransmitterID = 0;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context,
|
||||
index, (uint32_t *)&args);
|
||||
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
|
||||
if (args.v3.sOutput.ucRefDiv) {
|
||||
amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
|
||||
amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_REF_DIV;
|
||||
amdgpu_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
|
||||
}
|
||||
if (args.v3.sOutput.ucPostDiv) {
|
||||
amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
|
||||
amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_POST_DIV;
|
||||
amdgpu_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return adjusted_clock;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return adjusted_clock;
|
||||
}
|
||||
|
||||
return adjusted_clock;
|
||||
}
|
||||
|
||||
union set_pixel_clock {
|
||||
SET_PIXEL_CLOCK_PS_ALLOCATION base;
|
||||
PIXEL_CLOCK_PARAMETERS v1;
|
||||
PIXEL_CLOCK_PARAMETERS_V2 v2;
|
||||
PIXEL_CLOCK_PARAMETERS_V3 v3;
|
||||
PIXEL_CLOCK_PARAMETERS_V5 v5;
|
||||
PIXEL_CLOCK_PARAMETERS_V6 v6;
|
||||
};
|
||||
|
||||
/* on DCE5, make sure the voltage is high enough to support the
|
||||
* required disp clk.
|
||||
*/
|
||||
void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
|
||||
u32 dispclk)
|
||||
{
|
||||
u8 frev, crev;
|
||||
int index;
|
||||
union set_pixel_clock args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
|
||||
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
|
||||
&crev))
|
||||
return;
|
||||
|
||||
switch (frev) {
|
||||
case 1:
|
||||
switch (crev) {
|
||||
case 5:
|
||||
/* if the default dcpll clock is specified,
|
||||
* SetPixelClock provides the dividers
|
||||
*/
|
||||
args.v5.ucCRTC = ATOM_CRTC_INVALID;
|
||||
args.v5.usPixelClock = cpu_to_le16(dispclk);
|
||||
args.v5.ucPpll = ATOM_DCPLL;
|
||||
break;
|
||||
case 6:
|
||||
/* if the default dcpll clock is specified,
|
||||
* SetPixelClock provides the dividers
|
||||
*/
|
||||
args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
|
||||
args.v6.ucPpll = ATOM_EXT_PLL1;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return;
|
||||
}
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id)
|
||||
{
|
||||
if (ENCODER_MODE_IS_DP(encoder_mode)) {
|
||||
if (pll_id < ATOM_EXT_PLL1)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
|
||||
u32 crtc_id,
|
||||
int pll_id,
|
||||
u32 encoder_mode,
|
||||
u32 encoder_id,
|
||||
u32 clock,
|
||||
u32 ref_div,
|
||||
u32 fb_div,
|
||||
u32 frac_fb_div,
|
||||
u32 post_div,
|
||||
int bpc,
|
||||
bool ss_enabled,
|
||||
struct amdgpu_atom_ss *ss)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
u8 frev, crev;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
|
||||
union set_pixel_clock args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
|
||||
&crev))
|
||||
return;
|
||||
|
||||
switch (frev) {
|
||||
case 1:
|
||||
switch (crev) {
|
||||
case 1:
|
||||
if (clock == ATOM_DISABLE)
|
||||
return;
|
||||
args.v1.usPixelClock = cpu_to_le16(clock / 10);
|
||||
args.v1.usRefDiv = cpu_to_le16(ref_div);
|
||||
args.v1.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v1.ucFracFbDiv = frac_fb_div;
|
||||
args.v1.ucPostDiv = post_div;
|
||||
args.v1.ucPpll = pll_id;
|
||||
args.v1.ucCRTC = crtc_id;
|
||||
args.v1.ucRefDivSrc = 1;
|
||||
break;
|
||||
case 2:
|
||||
args.v2.usPixelClock = cpu_to_le16(clock / 10);
|
||||
args.v2.usRefDiv = cpu_to_le16(ref_div);
|
||||
args.v2.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v2.ucFracFbDiv = frac_fb_div;
|
||||
args.v2.ucPostDiv = post_div;
|
||||
args.v2.ucPpll = pll_id;
|
||||
args.v2.ucCRTC = crtc_id;
|
||||
args.v2.ucRefDivSrc = 1;
|
||||
break;
|
||||
case 3:
|
||||
args.v3.usPixelClock = cpu_to_le16(clock / 10);
|
||||
args.v3.usRefDiv = cpu_to_le16(ref_div);
|
||||
args.v3.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v3.ucFracFbDiv = frac_fb_div;
|
||||
args.v3.ucPostDiv = post_div;
|
||||
args.v3.ucPpll = pll_id;
|
||||
if (crtc_id == ATOM_CRTC2)
|
||||
args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
|
||||
else
|
||||
args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
|
||||
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
|
||||
args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
|
||||
args.v3.ucTransmitterId = encoder_id;
|
||||
args.v3.ucEncoderMode = encoder_mode;
|
||||
break;
|
||||
case 5:
|
||||
args.v5.ucCRTC = crtc_id;
|
||||
args.v5.usPixelClock = cpu_to_le16(clock / 10);
|
||||
args.v5.ucRefDiv = ref_div;
|
||||
args.v5.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
|
||||
args.v5.ucPostDiv = post_div;
|
||||
args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
|
||||
if ((ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) &&
|
||||
(pll_id < ATOM_EXT_PLL1))
|
||||
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
|
||||
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
|
||||
switch (bpc) {
|
||||
case 8:
|
||||
default:
|
||||
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
|
||||
break;
|
||||
case 10:
|
||||
/* yes this is correct, the atom define is wrong */
|
||||
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
|
||||
break;
|
||||
case 12:
|
||||
/* yes this is correct, the atom define is wrong */
|
||||
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
|
||||
break;
|
||||
}
|
||||
}
|
||||
args.v5.ucTransmitterID = encoder_id;
|
||||
args.v5.ucEncoderMode = encoder_mode;
|
||||
args.v5.ucPpll = pll_id;
|
||||
break;
|
||||
case 6:
|
||||
args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
|
||||
args.v6.ucRefDiv = ref_div;
|
||||
args.v6.usFbDiv = cpu_to_le16(fb_div);
|
||||
args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
|
||||
args.v6.ucPostDiv = post_div;
|
||||
args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
|
||||
if ((ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) &&
|
||||
(pll_id < ATOM_EXT_PLL1) &&
|
||||
!is_pixel_clock_source_from_pll(encoder_mode, pll_id))
|
||||
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
|
||||
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
|
||||
switch (bpc) {
|
||||
case 8:
|
||||
default:
|
||||
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
|
||||
break;
|
||||
case 10:
|
||||
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
|
||||
break;
|
||||
case 12:
|
||||
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
|
||||
break;
|
||||
case 16:
|
||||
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
|
||||
break;
|
||||
}
|
||||
}
|
||||
args.v6.ucTransmitterID = encoder_id;
|
||||
args.v6.ucEncoderMode = encoder_mode;
|
||||
args.v6.ucPpll = pll_id;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
return;
|
||||
}
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_encoder *amdgpu_encoder =
|
||||
to_amdgpu_encoder(amdgpu_crtc->encoder);
|
||||
int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
|
||||
|
||||
amdgpu_crtc->bpc = 8;
|
||||
amdgpu_crtc->ss_enabled = false;
|
||||
|
||||
if ((amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
|
||||
(amdgpu_encoder_get_dp_bridge_encoder_id(amdgpu_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
|
||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||
struct drm_connector *connector =
|
||||
amdgpu_get_connector_for_encoder(amdgpu_crtc->encoder);
|
||||
struct amdgpu_connector *amdgpu_connector =
|
||||
to_amdgpu_connector(connector);
|
||||
struct amdgpu_connector_atom_dig *dig_connector =
|
||||
amdgpu_connector->con_priv;
|
||||
int dp_clock;
|
||||
|
||||
/* Assign mode clock for hdmi deep color max clock limit check */
|
||||
amdgpu_connector->pixelclock_for_modeset = mode->clock;
|
||||
amdgpu_crtc->bpc = amdgpu_connector_get_monitor_bpc(connector);
|
||||
|
||||
switch (encoder_mode) {
|
||||
case ATOM_ENCODER_MODE_DP_MST:
|
||||
case ATOM_ENCODER_MODE_DP:
|
||||
/* DP/eDP */
|
||||
dp_clock = dig_connector->dp_clock / 10;
|
||||
amdgpu_crtc->ss_enabled =
|
||||
amdgpu_atombios_get_asic_ss_info(adev, &amdgpu_crtc->ss,
|
||||
ASIC_INTERNAL_SS_ON_DP,
|
||||
dp_clock);
|
||||
break;
|
||||
case ATOM_ENCODER_MODE_LVDS:
|
||||
amdgpu_crtc->ss_enabled =
|
||||
amdgpu_atombios_get_asic_ss_info(adev,
|
||||
&amdgpu_crtc->ss,
|
||||
dig->lcd_ss_id,
|
||||
mode->clock / 10);
|
||||
break;
|
||||
case ATOM_ENCODER_MODE_DVI:
|
||||
amdgpu_crtc->ss_enabled =
|
||||
amdgpu_atombios_get_asic_ss_info(adev,
|
||||
&amdgpu_crtc->ss,
|
||||
ASIC_INTERNAL_SS_ON_TMDS,
|
||||
mode->clock / 10);
|
||||
break;
|
||||
case ATOM_ENCODER_MODE_HDMI:
|
||||
amdgpu_crtc->ss_enabled =
|
||||
amdgpu_atombios_get_asic_ss_info(adev,
|
||||
&amdgpu_crtc->ss,
|
||||
ASIC_INTERNAL_SS_ON_HDMI,
|
||||
mode->clock / 10);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* adjust pixel clock as needed */
|
||||
amdgpu_crtc->adjusted_clock = amdgpu_atombios_crtc_adjust_pll(crtc, mode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_encoder *amdgpu_encoder =
|
||||
to_amdgpu_encoder(amdgpu_crtc->encoder);
|
||||
u32 pll_clock = mode->clock;
|
||||
u32 clock = mode->clock;
|
||||
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
|
||||
struct amdgpu_pll *pll;
|
||||
int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
|
||||
|
||||
/* pass the actual clock to amdgpu_atombios_crtc_program_pll for HDMI */
|
||||
if ((encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
|
||||
(amdgpu_crtc->bpc > 8))
|
||||
clock = amdgpu_crtc->adjusted_clock;
|
||||
|
||||
switch (amdgpu_crtc->pll_id) {
|
||||
case ATOM_PPLL1:
|
||||
pll = &adev->clock.ppll[0];
|
||||
break;
|
||||
case ATOM_PPLL2:
|
||||
pll = &adev->clock.ppll[1];
|
||||
break;
|
||||
case ATOM_PPLL0:
|
||||
case ATOM_PPLL_INVALID:
|
||||
default:
|
||||
pll = &adev->clock.ppll[2];
|
||||
break;
|
||||
}
|
||||
|
||||
/* update pll params */
|
||||
pll->flags = amdgpu_crtc->pll_flags;
|
||||
pll->reference_div = amdgpu_crtc->pll_reference_div;
|
||||
pll->post_div = amdgpu_crtc->pll_post_div;
|
||||
|
||||
amdgpu_pll_compute(pll, amdgpu_crtc->adjusted_clock, &pll_clock,
|
||||
&fb_div, &frac_fb_div, &ref_div, &post_div);
|
||||
|
||||
amdgpu_atombios_crtc_program_ss(adev, ATOM_DISABLE, amdgpu_crtc->pll_id,
|
||||
amdgpu_crtc->crtc_id, &amdgpu_crtc->ss);
|
||||
|
||||
amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
|
||||
encoder_mode, amdgpu_encoder->encoder_id, clock,
|
||||
ref_div, fb_div, frac_fb_div, post_div,
|
||||
amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
|
||||
|
||||
if (amdgpu_crtc->ss_enabled) {
|
||||
/* calculate ss amount and step size */
|
||||
u32 step_size;
|
||||
u32 amount = (((fb_div * 10) + frac_fb_div) *
|
||||
(u32)amdgpu_crtc->ss.percentage) /
|
||||
(100 * (u32)amdgpu_crtc->ss.percentage_divider);
|
||||
amdgpu_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
|
||||
amdgpu_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
|
||||
ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
|
||||
if (amdgpu_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
|
||||
step_size = (4 * amount * ref_div * ((u32)amdgpu_crtc->ss.rate * 2048)) /
|
||||
(125 * 25 * pll->reference_freq / 100);
|
||||
else
|
||||
step_size = (2 * amount * ref_div * ((u32)amdgpu_crtc->ss.rate * 2048)) /
|
||||
(125 * 25 * pll->reference_freq / 100);
|
||||
amdgpu_crtc->ss.step = step_size;
|
||||
|
||||
amdgpu_atombios_crtc_program_ss(adev, ATOM_ENABLE, amdgpu_crtc->pll_id,
|
||||
amdgpu_crtc->crtc_id, &amdgpu_crtc->ss);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __ATOMBIOS_CRTC_H__
|
||||
#define __ATOMBIOS_CRTC_H__
|
||||
|
||||
void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc);
|
||||
void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock);
|
||||
void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state);
|
||||
void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state);
|
||||
void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state);
|
||||
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev);
|
||||
void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode);
|
||||
void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
|
||||
u32 dispclk);
|
||||
void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
|
||||
u32 crtc_id,
|
||||
int pll_id,
|
||||
u32 encoder_mode,
|
||||
u32 encoder_id,
|
||||
u32 clock,
|
||||
u32 ref_div,
|
||||
u32 fb_div,
|
||||
u32 frac_fb_div,
|
||||
u32 post_div,
|
||||
int bpc,
|
||||
bool ss_enabled,
|
||||
struct amdgpu_atom_ss *ss);
|
||||
int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode);
|
||||
void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,774 @@
|
|||
/*
|
||||
* Copyright 2007-8 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
#include "atom.h"
|
||||
#include "atom-bits.h"
|
||||
#include "atombios_encoders.h"
|
||||
#include "atombios_dp.h"
|
||||
#include "amdgpu_connectors.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include <drm/drm_dp_helper.h>
|
||||
|
||||
/* move these to drm_dp_helper.c/h */
|
||||
#define DP_LINK_CONFIGURATION_SIZE 9
|
||||
#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
|
||||
|
||||
static char *voltage_names[] = {
|
||||
"0.4V", "0.6V", "0.8V", "1.2V"
|
||||
};
|
||||
static char *pre_emph_names[] = {
|
||||
"0dB", "3.5dB", "6dB", "9.5dB"
|
||||
};
|
||||
|
||||
/***** amdgpu AUX functions *****/
|
||||
|
||||
union aux_channel_transaction {
|
||||
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
|
||||
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
|
||||
};
|
||||
|
||||
static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
|
||||
u8 *send, int send_bytes,
|
||||
u8 *recv, int recv_size,
|
||||
u8 delay, u8 *ack)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
union aux_channel_transaction args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
|
||||
unsigned char *base;
|
||||
int recv_bytes;
|
||||
int r = 0;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
mutex_lock(&chan->mutex);
|
||||
|
||||
base = (unsigned char *)(adev->mode_info.atom_context->scratch + 1);
|
||||
|
||||
amdgpu_atombios_copy_swap(base, send, send_bytes, true);
|
||||
|
||||
args.v2.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
|
||||
args.v2.lpDataOut = cpu_to_le16((u16)(16 + 4));
|
||||
args.v2.ucDataOutLen = 0;
|
||||
args.v2.ucChannelID = chan->rec.i2c_id;
|
||||
args.v2.ucDelay = delay / 10;
|
||||
args.v2.ucHPD_ID = chan->rec.hpd;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
*ack = args.v2.ucReplyStatus;
|
||||
|
||||
/* timeout */
|
||||
if (args.v2.ucReplyStatus == 1) {
|
||||
DRM_DEBUG_KMS("dp_aux_ch timeout\n");
|
||||
r = -ETIMEDOUT;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* flags not zero */
|
||||
if (args.v2.ucReplyStatus == 2) {
|
||||
DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
|
||||
r = -EIO;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* error */
|
||||
if (args.v2.ucReplyStatus == 3) {
|
||||
DRM_DEBUG_KMS("dp_aux_ch error\n");
|
||||
r = -EIO;
|
||||
goto done;
|
||||
}
|
||||
|
||||
recv_bytes = args.v1.ucDataOutLen;
|
||||
if (recv_bytes > recv_size)
|
||||
recv_bytes = recv_size;
|
||||
|
||||
if (recv && recv_size)
|
||||
amdgpu_atombios_copy_swap(recv, base + 16, recv_bytes, false);
|
||||
|
||||
r = recv_bytes;
|
||||
done:
|
||||
mutex_unlock(&chan->mutex);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
#define BARE_ADDRESS_SIZE 3
|
||||
#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
|
||||
|
||||
static ssize_t
|
||||
amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
||||
{
|
||||
struct amdgpu_i2c_chan *chan =
|
||||
container_of(aux, struct amdgpu_i2c_chan, aux);
|
||||
int ret;
|
||||
u8 tx_buf[20];
|
||||
size_t tx_size;
|
||||
u8 ack, delay = 0;
|
||||
|
||||
if (WARN_ON(msg->size > 16))
|
||||
return -E2BIG;
|
||||
|
||||
tx_buf[0] = msg->address & 0xff;
|
||||
tx_buf[1] = msg->address >> 8;
|
||||
tx_buf[2] = msg->request << 4;
|
||||
tx_buf[3] = msg->size ? (msg->size - 1) : 0;
|
||||
|
||||
switch (msg->request & ~DP_AUX_I2C_MOT) {
|
||||
case DP_AUX_NATIVE_WRITE:
|
||||
case DP_AUX_I2C_WRITE:
|
||||
/* tx_size needs to be 4 even for bare address packets since the atom
|
||||
* table needs the info in tx_buf[3].
|
||||
*/
|
||||
tx_size = HEADER_SIZE + msg->size;
|
||||
if (msg->size == 0)
|
||||
tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
|
||||
else
|
||||
tx_buf[3] |= tx_size << 4;
|
||||
memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size);
|
||||
ret = amdgpu_atombios_dp_process_aux_ch(chan,
|
||||
tx_buf, tx_size, NULL, 0, delay, &ack);
|
||||
if (ret >= 0)
|
||||
/* Return payload size. */
|
||||
ret = msg->size;
|
||||
break;
|
||||
case DP_AUX_NATIVE_READ:
|
||||
case DP_AUX_I2C_READ:
|
||||
/* tx_size needs to be 4 even for bare address packets since the atom
|
||||
* table needs the info in tx_buf[3].
|
||||
*/
|
||||
tx_size = HEADER_SIZE;
|
||||
if (msg->size == 0)
|
||||
tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
|
||||
else
|
||||
tx_buf[3] |= tx_size << 4;
|
||||
ret = amdgpu_atombios_dp_process_aux_ch(chan,
|
||||
tx_buf, tx_size, msg->buffer, msg->size, delay, &ack);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret >= 0)
|
||||
msg->reply = ack >> 4;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
|
||||
{
|
||||
int ret;
|
||||
|
||||
amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
|
||||
amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
|
||||
amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
|
||||
ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
|
||||
if (!ret)
|
||||
amdgpu_connector->ddc_bus->has_aux = true;
|
||||
|
||||
WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
|
||||
}
|
||||
|
||||
/***** general DP utility functions *****/
|
||||
|
||||
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
|
||||
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3
|
||||
|
||||
static void amdgpu_atombios_dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane_count,
|
||||
u8 train_set[4])
|
||||
{
|
||||
u8 v = 0;
|
||||
u8 p = 0;
|
||||
int lane;
|
||||
|
||||
for (lane = 0; lane < lane_count; lane++) {
|
||||
u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
|
||||
u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
|
||||
|
||||
DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
|
||||
lane,
|
||||
voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
|
||||
pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
|
||||
|
||||
if (this_v > v)
|
||||
v = this_v;
|
||||
if (this_p > p)
|
||||
p = this_p;
|
||||
}
|
||||
|
||||
if (v >= DP_VOLTAGE_MAX)
|
||||
v |= DP_TRAIN_MAX_SWING_REACHED;
|
||||
|
||||
if (p >= DP_PRE_EMPHASIS_MAX)
|
||||
p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
|
||||
|
||||
DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
|
||||
voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
|
||||
pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
|
||||
|
||||
for (lane = 0; lane < 4; lane++)
|
||||
train_set[lane] = v | p;
|
||||
}
|
||||
|
||||
/* convert bits per color to bits per pixel */
|
||||
/* get bpc from the EDID */
|
||||
static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
|
||||
{
|
||||
if (bpc == 0)
|
||||
return 24;
|
||||
else
|
||||
return bpc * 3;
|
||||
}
|
||||
|
||||
/* get the max pix clock supported by the link rate and lane num */
|
||||
static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
|
||||
int lane_num,
|
||||
int bpp)
|
||||
{
|
||||
return (link_rate * lane_num * 8) / bpp;
|
||||
}
|
||||
|
||||
/***** amdgpu specific DP functions *****/
|
||||
|
||||
/* First get the min lane# when low rate is used according to pixel clock
|
||||
* (prefer low rate), second check max lane# supported by DP panel,
|
||||
* if the max lane# < low rate lane# then use max lane# instead.
|
||||
*/
|
||||
static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
|
||||
u8 dpcd[DP_DPCD_SIZE],
|
||||
int pix_clock)
|
||||
{
|
||||
int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
|
||||
int max_link_rate = drm_dp_max_link_rate(dpcd);
|
||||
int max_lane_num = drm_dp_max_lane_count(dpcd);
|
||||
int lane_num;
|
||||
int max_dp_pix_clock;
|
||||
|
||||
for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
|
||||
max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
|
||||
if (pix_clock <= max_dp_pix_clock)
|
||||
break;
|
||||
}
|
||||
|
||||
return lane_num;
|
||||
}
|
||||
|
||||
static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
|
||||
u8 dpcd[DP_DPCD_SIZE],
|
||||
int pix_clock)
|
||||
{
|
||||
int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
|
||||
int lane_num, max_pix_clock;
|
||||
|
||||
if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
|
||||
ENCODER_OBJECT_ID_NUTMEG)
|
||||
return 270000;
|
||||
|
||||
lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
|
||||
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
|
||||
if (pix_clock <= max_pix_clock)
|
||||
return 162000;
|
||||
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
|
||||
if (pix_clock <= max_pix_clock)
|
||||
return 270000;
|
||||
if (amdgpu_connector_is_dp12_capable(connector)) {
|
||||
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
|
||||
if (pix_clock <= max_pix_clock)
|
||||
return 540000;
|
||||
}
|
||||
|
||||
return drm_dp_max_link_rate(dpcd);
|
||||
}
|
||||
|
||||
static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
|
||||
int action, int dp_clock,
|
||||
u8 ucconfig, u8 lane_num)
|
||||
{
|
||||
DP_ENCODER_SERVICE_PARAMETERS args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.ucLinkClock = dp_clock / 10;
|
||||
args.ucConfig = ucconfig;
|
||||
args.ucAction = action;
|
||||
args.ucLaneNum = lane_num;
|
||||
args.ucStatus = 0;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
return args.ucStatus;
|
||||
}
|
||||
|
||||
u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector)
|
||||
{
|
||||
struct drm_device *dev = amdgpu_connector->base.dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
|
||||
amdgpu_connector->ddc_bus->rec.i2c_id, 0);
|
||||
}
|
||||
|
||||
static void amdgpu_atombios_dp_probe_oui(struct amdgpu_connector *amdgpu_connector)
|
||||
{
|
||||
struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
|
||||
u8 buf[3];
|
||||
|
||||
if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
|
||||
return;
|
||||
|
||||
if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
|
||||
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
|
||||
buf[0], buf[1], buf[2]);
|
||||
|
||||
if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
|
||||
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
|
||||
buf[0], buf[1], buf[2]);
|
||||
}
|
||||
|
||||
int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
|
||||
{
|
||||
struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
|
||||
u8 msg[DP_DPCD_SIZE];
|
||||
int ret, i;
|
||||
|
||||
ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg,
|
||||
DP_DPCD_SIZE);
|
||||
if (ret > 0) {
|
||||
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
|
||||
DRM_DEBUG_KMS("DPCD: ");
|
||||
for (i = 0; i < DP_DPCD_SIZE; i++)
|
||||
DRM_DEBUG_KMS("%02x ", msg[i]);
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
amdgpu_atombios_dp_probe_oui(amdgpu_connector);
|
||||
|
||||
return 0;
|
||||
}
|
||||
dig_connector->dpcd[0] = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
struct amdgpu_connector_atom_dig *dig_connector;
|
||||
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
|
||||
u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector);
|
||||
u8 tmp;
|
||||
|
||||
if (!amdgpu_connector->con_priv)
|
||||
return panel_mode;
|
||||
|
||||
dig_connector = amdgpu_connector->con_priv;
|
||||
|
||||
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
|
||||
/* DP bridge chips */
|
||||
if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
|
||||
DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
|
||||
if (tmp & 1)
|
||||
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
|
||||
else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
|
||||
(dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
|
||||
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
|
||||
else
|
||||
panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
|
||||
}
|
||||
} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
|
||||
/* eDP */
|
||||
if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
|
||||
DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
|
||||
if (tmp & 1)
|
||||
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
|
||||
}
|
||||
}
|
||||
|
||||
return panel_mode;
|
||||
}
|
||||
|
||||
void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
struct amdgpu_connector_atom_dig *dig_connector;
|
||||
|
||||
if (!amdgpu_connector->con_priv)
|
||||
return;
|
||||
dig_connector = amdgpu_connector->con_priv;
|
||||
|
||||
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
|
||||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
|
||||
dig_connector->dp_clock =
|
||||
amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
|
||||
dig_connector->dp_lane_count =
|
||||
amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
struct amdgpu_connector_atom_dig *dig_connector;
|
||||
int dp_clock;
|
||||
|
||||
if (!amdgpu_connector->con_priv)
|
||||
return MODE_CLOCK_HIGH;
|
||||
dig_connector = amdgpu_connector->con_priv;
|
||||
|
||||
dp_clock =
|
||||
amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
|
||||
|
||||
if ((dp_clock == 540000) &&
|
||||
(!amdgpu_connector_is_dp12_capable(connector)))
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector)
|
||||
{
|
||||
u8 link_status[DP_LINK_STATUS_SIZE];
|
||||
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
|
||||
|
||||
if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
|
||||
<= 0)
|
||||
return false;
|
||||
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
|
||||
u8 power_state)
|
||||
{
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
struct amdgpu_connector_atom_dig *dig_connector;
|
||||
|
||||
if (!amdgpu_connector->con_priv)
|
||||
return;
|
||||
|
||||
dig_connector = amdgpu_connector->con_priv;
|
||||
|
||||
/* power up/down the sink */
|
||||
if (dig_connector->dpcd[0] >= 0x11) {
|
||||
drm_dp_dpcd_writeb(&amdgpu_connector->ddc_bus->aux,
|
||||
DP_SET_POWER, power_state);
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
struct amdgpu_atombios_dp_link_train_info {
|
||||
struct amdgpu_device *adev;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_connector *connector;
|
||||
int dp_clock;
|
||||
int dp_lane_count;
|
||||
bool tp3_supported;
|
||||
u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
u8 train_set[4];
|
||||
u8 link_status[DP_LINK_STATUS_SIZE];
|
||||
u8 tries;
|
||||
struct drm_dp_aux *aux;
|
||||
};
|
||||
|
||||
static void
|
||||
amdgpu_atombios_dp_update_vs_emph(struct amdgpu_atombios_dp_link_train_info *dp_info)
|
||||
{
|
||||
/* set the initial vs/emph on the source */
|
||||
amdgpu_atombios_encoder_setup_dig_transmitter(dp_info->encoder,
|
||||
ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
|
||||
0, dp_info->train_set[0]); /* sets all lanes at once */
|
||||
|
||||
/* set the vs/emph on the sink */
|
||||
drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET,
|
||||
dp_info->train_set, dp_info->dp_lane_count);
|
||||
}
|
||||
|
||||
static void
|
||||
amdgpu_atombios_dp_set_tp(struct amdgpu_atombios_dp_link_train_info *dp_info, int tp)
|
||||
{
|
||||
int rtp = 0;
|
||||
|
||||
/* set training pattern on the source */
|
||||
switch (tp) {
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
|
||||
break;
|
||||
}
|
||||
amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder, rtp, 0);
|
||||
|
||||
/* enable training pattern on the sink */
|
||||
drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp);
|
||||
}
|
||||
|
||||
static int
|
||||
amdgpu_atombios_dp_link_train_init(struct amdgpu_atombios_dp_link_train_info *dp_info)
|
||||
{
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(dp_info->encoder);
|
||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||
u8 tmp;
|
||||
|
||||
/* power up the sink */
|
||||
amdgpu_atombios_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0);
|
||||
|
||||
/* possibly enable downspread on the sink */
|
||||
if (dp_info->dpcd[3] & 0x1)
|
||||
drm_dp_dpcd_writeb(dp_info->aux,
|
||||
DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
|
||||
else
|
||||
drm_dp_dpcd_writeb(dp_info->aux,
|
||||
DP_DOWNSPREAD_CTRL, 0);
|
||||
|
||||
if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
|
||||
drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
|
||||
|
||||
/* set the lane count on the sink */
|
||||
tmp = dp_info->dp_lane_count;
|
||||
if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
|
||||
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
|
||||
drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp);
|
||||
|
||||
/* set the link rate on the sink */
|
||||
tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
|
||||
drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp);
|
||||
|
||||
/* start training on the source */
|
||||
amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder,
|
||||
ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
|
||||
|
||||
/* disable the training pattern on the sink */
|
||||
drm_dp_dpcd_writeb(dp_info->aux,
|
||||
DP_TRAINING_PATTERN_SET,
|
||||
DP_TRAINING_PATTERN_DISABLE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
amdgpu_atombios_dp_link_train_finish(struct amdgpu_atombios_dp_link_train_info *dp_info)
|
||||
{
|
||||
udelay(400);
|
||||
|
||||
/* disable the training pattern on the sink */
|
||||
drm_dp_dpcd_writeb(dp_info->aux,
|
||||
DP_TRAINING_PATTERN_SET,
|
||||
DP_TRAINING_PATTERN_DISABLE);
|
||||
|
||||
/* disable the training pattern on the source */
|
||||
amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder,
|
||||
ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_info)
|
||||
{
|
||||
bool clock_recovery;
|
||||
u8 voltage;
|
||||
int i;
|
||||
|
||||
amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
|
||||
memset(dp_info->train_set, 0, 4);
|
||||
amdgpu_atombios_dp_update_vs_emph(dp_info);
|
||||
|
||||
udelay(400);
|
||||
|
||||
/* clock recovery loop */
|
||||
clock_recovery = false;
|
||||
dp_info->tries = 0;
|
||||
voltage = 0xff;
|
||||
while (1) {
|
||||
drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
|
||||
|
||||
if (drm_dp_dpcd_read_link_status(dp_info->aux,
|
||||
dp_info->link_status) <= 0) {
|
||||
DRM_ERROR("displayport link status failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
|
||||
clock_recovery = true;
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < dp_info->dp_lane_count; i++) {
|
||||
if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
|
||||
break;
|
||||
}
|
||||
if (i == dp_info->dp_lane_count) {
|
||||
DRM_ERROR("clock recovery reached max voltage\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
|
||||
++dp_info->tries;
|
||||
if (dp_info->tries == 5) {
|
||||
DRM_ERROR("clock recovery tried 5 times\n");
|
||||
break;
|
||||
}
|
||||
} else
|
||||
dp_info->tries = 0;
|
||||
|
||||
voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
|
||||
|
||||
/* Compute new train_set as requested by sink */
|
||||
amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count,
|
||||
dp_info->train_set);
|
||||
|
||||
amdgpu_atombios_dp_update_vs_emph(dp_info);
|
||||
}
|
||||
if (!clock_recovery) {
|
||||
DRM_ERROR("clock recovery failed\n");
|
||||
return -1;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
|
||||
dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
|
||||
(dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
|
||||
DP_TRAIN_PRE_EMPHASIS_SHIFT);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_info)
|
||||
{
|
||||
bool channel_eq;
|
||||
|
||||
if (dp_info->tp3_supported)
|
||||
amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
|
||||
else
|
||||
amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
|
||||
|
||||
/* channel equalization loop */
|
||||
dp_info->tries = 0;
|
||||
channel_eq = false;
|
||||
while (1) {
|
||||
drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
|
||||
|
||||
if (drm_dp_dpcd_read_link_status(dp_info->aux,
|
||||
dp_info->link_status) <= 0) {
|
||||
DRM_ERROR("displayport link status failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
|
||||
channel_eq = true;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Try 5 times */
|
||||
if (dp_info->tries > 5) {
|
||||
DRM_ERROR("channel eq failed: 5 tries\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* Compute new train_set as requested by sink */
|
||||
amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count,
|
||||
dp_info->train_set);
|
||||
|
||||
amdgpu_atombios_dp_update_vs_emph(dp_info);
|
||||
dp_info->tries++;
|
||||
}
|
||||
|
||||
if (!channel_eq) {
|
||||
DRM_ERROR("channel eq failed\n");
|
||||
return -1;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
|
||||
dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
|
||||
(dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
|
||||
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
struct amdgpu_encoder_atom_dig *dig;
|
||||
struct amdgpu_connector *amdgpu_connector;
|
||||
struct amdgpu_connector_atom_dig *dig_connector;
|
||||
struct amdgpu_atombios_dp_link_train_info dp_info;
|
||||
u8 tmp;
|
||||
|
||||
if (!amdgpu_encoder->enc_priv)
|
||||
return;
|
||||
dig = amdgpu_encoder->enc_priv;
|
||||
|
||||
amdgpu_connector = to_amdgpu_connector(connector);
|
||||
if (!amdgpu_connector->con_priv)
|
||||
return;
|
||||
dig_connector = amdgpu_connector->con_priv;
|
||||
|
||||
if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
|
||||
(dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
|
||||
return;
|
||||
|
||||
if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
|
||||
== 1) {
|
||||
if (tmp & DP_TPS3_SUPPORTED)
|
||||
dp_info.tp3_supported = true;
|
||||
else
|
||||
dp_info.tp3_supported = false;
|
||||
} else {
|
||||
dp_info.tp3_supported = false;
|
||||
}
|
||||
|
||||
memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
|
||||
dp_info.adev = adev;
|
||||
dp_info.encoder = encoder;
|
||||
dp_info.connector = connector;
|
||||
dp_info.dp_lane_count = dig_connector->dp_lane_count;
|
||||
dp_info.dp_clock = dig_connector->dp_clock;
|
||||
dp_info.aux = &amdgpu_connector->ddc_bus->aux;
|
||||
|
||||
if (amdgpu_atombios_dp_link_train_init(&dp_info))
|
||||
goto done;
|
||||
if (amdgpu_atombios_dp_link_train_cr(&dp_info))
|
||||
goto done;
|
||||
if (amdgpu_atombios_dp_link_train_ce(&dp_info))
|
||||
goto done;
|
||||
done:
|
||||
if (amdgpu_atombios_dp_link_train_finish(&dp_info))
|
||||
return;
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __ATOMBIOS_DP_H__
|
||||
#define __ATOMBIOS_DP_H__
|
||||
|
||||
void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector);
|
||||
u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector);
|
||||
int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector);
|
||||
int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector);
|
||||
void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
|
||||
const struct drm_display_mode *mode);
|
||||
int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode);
|
||||
bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector);
|
||||
void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
|
||||
u8 power_state);
|
||||
void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector);
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __ATOMBIOS_ENCODER_H__
|
||||
#define __ATOMBIOS_ENCODER_H__
|
||||
|
||||
u8
|
||||
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
|
||||
void
|
||||
amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
|
||||
u8 level);
|
||||
void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encoder,
|
||||
struct drm_connector *drm_connector);
|
||||
void
|
||||
amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder);
|
||||
bool amdgpu_atombios_encoder_is_digital(struct drm_encoder *encoder);
|
||||
bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder);
|
||||
void
|
||||
amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
|
||||
int action, int panel_mode);
|
||||
void
|
||||
amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action,
|
||||
uint8_t lane_num, uint8_t lane_set);
|
||||
bool
|
||||
amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
|
||||
int action);
|
||||
void
|
||||
amdgpu_atombios_encoder_dpms(struct drm_encoder *encoder, int mode);
|
||||
void
|
||||
amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder);
|
||||
void
|
||||
amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev);
|
||||
enum drm_connector_status
|
||||
amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector);
|
||||
enum drm_connector_status
|
||||
amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector);
|
||||
void
|
||||
amdgpu_atombios_encoder_setup_ext_encoder_ddc(struct drm_encoder *encoder);
|
||||
void
|
||||
amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector,
|
||||
struct drm_encoder *encoder,
|
||||
bool connected);
|
||||
struct amdgpu_encoder_atom_dig *
|
||||
amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder);
|
||||
struct amdgpu_encoder_atom_dig *
|
||||
amdgpu_atombios_encoder_get_dig_info(struct amdgpu_encoder *amdgpu_encoder);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
* Copyright 2011 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
|
||||
#define TARGET_HW_I2C_CLOCK 50
|
||||
|
||||
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
|
||||
#define ATOM_MAX_HW_I2C_WRITE 3
|
||||
#define ATOM_MAX_HW_I2C_READ 255
|
||||
|
||||
static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
|
||||
u8 slave_addr, u8 flags,
|
||||
u8 *buf, u8 num)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
|
||||
unsigned char *base;
|
||||
u16 out = cpu_to_le16(0);
|
||||
int r = 0;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
mutex_lock(&chan->mutex);
|
||||
|
||||
base = (unsigned char *)adev->mode_info.atom_context->scratch;
|
||||
|
||||
if (flags & HW_I2C_WRITE) {
|
||||
if (num > ATOM_MAX_HW_I2C_WRITE) {
|
||||
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
|
||||
r = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (buf == NULL)
|
||||
args.ucRegIndex = 0;
|
||||
else
|
||||
args.ucRegIndex = buf[0];
|
||||
if (num)
|
||||
num--;
|
||||
if (num)
|
||||
memcpy(&out, &buf[1], num);
|
||||
args.lpI2CDataOut = cpu_to_le16(out);
|
||||
} else {
|
||||
if (num > ATOM_MAX_HW_I2C_READ) {
|
||||
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
|
||||
r = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
args.ucRegIndex = 0;
|
||||
args.lpI2CDataOut = 0;
|
||||
}
|
||||
|
||||
args.ucFlag = flags;
|
||||
args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
|
||||
args.ucTransBytes = num;
|
||||
args.ucSlaveAddr = slave_addr << 1;
|
||||
args.ucLineNumber = chan->rec.i2c_id;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
/* error */
|
||||
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
|
||||
DRM_DEBUG_KMS("hw_i2c error\n");
|
||||
r = -EIO;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!(flags & HW_I2C_WRITE))
|
||||
amdgpu_atombios_copy_swap(buf, base, num, false);
|
||||
|
||||
done:
|
||||
mutex_unlock(&chan->mutex);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
{
|
||||
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
|
||||
struct i2c_msg *p;
|
||||
int i, remaining, current_count, buffer_offset, max_bytes, ret;
|
||||
u8 flags;
|
||||
|
||||
/* check for bus probe */
|
||||
p = &msgs[0];
|
||||
if ((num == 1) && (p->len == 0)) {
|
||||
ret = amdgpu_atombios_i2c_process_i2c_ch(i2c,
|
||||
p->addr, HW_I2C_WRITE,
|
||||
NULL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
else
|
||||
return num;
|
||||
}
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
p = &msgs[i];
|
||||
remaining = p->len;
|
||||
buffer_offset = 0;
|
||||
/* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
|
||||
if (p->flags & I2C_M_RD) {
|
||||
max_bytes = ATOM_MAX_HW_I2C_READ;
|
||||
flags = HW_I2C_READ;
|
||||
} else {
|
||||
max_bytes = ATOM_MAX_HW_I2C_WRITE;
|
||||
flags = HW_I2C_WRITE;
|
||||
}
|
||||
while (remaining) {
|
||||
if (remaining > max_bytes)
|
||||
current_count = max_bytes;
|
||||
else
|
||||
current_count = remaining;
|
||||
ret = amdgpu_atombios_i2c_process_i2c_ch(i2c,
|
||||
p->addr, flags,
|
||||
&p->buf[buffer_offset], current_count);
|
||||
if (ret)
|
||||
return ret;
|
||||
remaining -= current_count;
|
||||
buffer_offset += current_count;
|
||||
}
|
||||
}
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap)
|
||||
{
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
}
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __ATOMBIOS_I2C_H__
|
||||
#define __ATOMBIOS_I2C_H__
|
||||
|
||||
int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num);
|
||||
u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,550 @@
|
|||
/*
|
||||
* Copyright 2012 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#ifndef CIK_H
|
||||
#define CIK_H
|
||||
|
||||
#define MC_SEQ_MISC0__GDDR5__SHIFT 0x1c
|
||||
#define MC_SEQ_MISC0__GDDR5_MASK 0xf0000000
|
||||
#define MC_SEQ_MISC0__GDDR5_VALUE 5
|
||||
|
||||
#define CP_ME_TABLE_SIZE 96
|
||||
|
||||
/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
|
||||
#define CRTC0_REGISTER_OFFSET (0x1b7c - 0x1b7c)
|
||||
#define CRTC1_REGISTER_OFFSET (0x1e7c - 0x1b7c)
|
||||
#define CRTC2_REGISTER_OFFSET (0x417c - 0x1b7c)
|
||||
#define CRTC3_REGISTER_OFFSET (0x447c - 0x1b7c)
|
||||
#define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c)
|
||||
#define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c)
|
||||
|
||||
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
|
||||
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
|
||||
|
||||
#define CIK_RB_BITMAP_WIDTH_PER_SH 2
|
||||
#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
|
||||
|
||||
#define AMDGPU_NUM_OF_VMIDS 8
|
||||
|
||||
#define PIPEID(x) ((x) << 0)
|
||||
#define MEID(x) ((x) << 2)
|
||||
#define VMID(x) ((x) << 4)
|
||||
#define QUEUEID(x) ((x) << 8)
|
||||
|
||||
#define mmCC_DRM_ID_STRAPS 0x1559
|
||||
#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000
|
||||
|
||||
#define mmCHUB_CONTROL 0x619
|
||||
#define BYPASS_VM (1 << 0)
|
||||
|
||||
#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
|
||||
|
||||
#define mmGRPH_LUT_10BIT_BYPASS_CONTROL 0x1a02
|
||||
#define LUT_10BIT_BYPASS_EN (1 << 8)
|
||||
|
||||
# define CURSOR_MONO 0
|
||||
# define CURSOR_24_1 1
|
||||
# define CURSOR_24_8_PRE_MULT 2
|
||||
# define CURSOR_24_8_UNPRE_MULT 3
|
||||
# define CURSOR_URGENT_ALWAYS 0
|
||||
# define CURSOR_URGENT_1_8 1
|
||||
# define CURSOR_URGENT_1_4 2
|
||||
# define CURSOR_URGENT_3_8 3
|
||||
# define CURSOR_URGENT_1_2 4
|
||||
|
||||
# define GRPH_DEPTH_8BPP 0
|
||||
# define GRPH_DEPTH_16BPP 1
|
||||
# define GRPH_DEPTH_32BPP 2
|
||||
/* 8 BPP */
|
||||
# define GRPH_FORMAT_INDEXED 0
|
||||
/* 16 BPP */
|
||||
# define GRPH_FORMAT_ARGB1555 0
|
||||
# define GRPH_FORMAT_ARGB565 1
|
||||
# define GRPH_FORMAT_ARGB4444 2
|
||||
# define GRPH_FORMAT_AI88 3
|
||||
# define GRPH_FORMAT_MONO16 4
|
||||
# define GRPH_FORMAT_BGRA5551 5
|
||||
/* 32 BPP */
|
||||
# define GRPH_FORMAT_ARGB8888 0
|
||||
# define GRPH_FORMAT_ARGB2101010 1
|
||||
# define GRPH_FORMAT_32BPP_DIG 2
|
||||
# define GRPH_FORMAT_8B_ARGB2101010 3
|
||||
# define GRPH_FORMAT_BGRA1010102 4
|
||||
# define GRPH_FORMAT_8B_BGRA1010102 5
|
||||
# define GRPH_FORMAT_RGB111110 6
|
||||
# define GRPH_FORMAT_BGR101111 7
|
||||
# define ADDR_SURF_MACRO_TILE_ASPECT_1 0
|
||||
# define ADDR_SURF_MACRO_TILE_ASPECT_2 1
|
||||
# define ADDR_SURF_MACRO_TILE_ASPECT_4 2
|
||||
# define ADDR_SURF_MACRO_TILE_ASPECT_8 3
|
||||
# define GRPH_ARRAY_LINEAR_GENERAL 0
|
||||
# define GRPH_ARRAY_LINEAR_ALIGNED 1
|
||||
# define GRPH_ARRAY_1D_TILED_THIN1 2
|
||||
# define GRPH_ARRAY_2D_TILED_THIN1 4
|
||||
# define DISPLAY_MICRO_TILING 0
|
||||
# define THIN_MICRO_TILING 1
|
||||
# define DEPTH_MICRO_TILING 2
|
||||
# define ROTATED_MICRO_TILING 4
|
||||
# define GRPH_ENDIAN_NONE 0
|
||||
# define GRPH_ENDIAN_8IN16 1
|
||||
# define GRPH_ENDIAN_8IN32 2
|
||||
# define GRPH_ENDIAN_8IN64 3
|
||||
# define GRPH_RED_SEL_R 0
|
||||
# define GRPH_RED_SEL_G 1
|
||||
# define GRPH_RED_SEL_B 2
|
||||
# define GRPH_RED_SEL_A 3
|
||||
# define GRPH_GREEN_SEL_G 0
|
||||
# define GRPH_GREEN_SEL_B 1
|
||||
# define GRPH_GREEN_SEL_A 2
|
||||
# define GRPH_GREEN_SEL_R 3
|
||||
# define GRPH_BLUE_SEL_B 0
|
||||
# define GRPH_BLUE_SEL_A 1
|
||||
# define GRPH_BLUE_SEL_R 2
|
||||
# define GRPH_BLUE_SEL_G 3
|
||||
# define GRPH_ALPHA_SEL_A 0
|
||||
# define GRPH_ALPHA_SEL_R 1
|
||||
# define GRPH_ALPHA_SEL_G 2
|
||||
# define GRPH_ALPHA_SEL_B 3
|
||||
# define INPUT_GAMMA_USE_LUT 0
|
||||
# define INPUT_GAMMA_BYPASS 1
|
||||
# define INPUT_GAMMA_SRGB_24 2
|
||||
# define INPUT_GAMMA_XVYCC_222 3
|
||||
|
||||
# define INPUT_CSC_BYPASS 0
|
||||
# define INPUT_CSC_PROG_COEFF 1
|
||||
# define INPUT_CSC_PROG_SHARED_MATRIXA 2
|
||||
|
||||
# define OUTPUT_CSC_BYPASS 0
|
||||
# define OUTPUT_CSC_TV_RGB 1
|
||||
# define OUTPUT_CSC_YCBCR_601 2
|
||||
# define OUTPUT_CSC_YCBCR_709 3
|
||||
# define OUTPUT_CSC_PROG_COEFF 4
|
||||
# define OUTPUT_CSC_PROG_SHARED_MATRIXB 5
|
||||
|
||||
# define DEGAMMA_BYPASS 0
|
||||
# define DEGAMMA_SRGB_24 1
|
||||
# define DEGAMMA_XVYCC_222 2
|
||||
# define GAMUT_REMAP_BYPASS 0
|
||||
# define GAMUT_REMAP_PROG_COEFF 1
|
||||
# define GAMUT_REMAP_PROG_SHARED_MATRIXA 2
|
||||
# define GAMUT_REMAP_PROG_SHARED_MATRIXB 3
|
||||
|
||||
# define REGAMMA_BYPASS 0
|
||||
# define REGAMMA_SRGB_24 1
|
||||
# define REGAMMA_XVYCC_222 2
|
||||
# define REGAMMA_PROG_A 3
|
||||
# define REGAMMA_PROG_B 4
|
||||
|
||||
# define FMT_CLAMP_6BPC 0
|
||||
# define FMT_CLAMP_8BPC 1
|
||||
# define FMT_CLAMP_10BPC 2
|
||||
|
||||
# define HDMI_24BIT_DEEP_COLOR 0
|
||||
# define HDMI_30BIT_DEEP_COLOR 1
|
||||
# define HDMI_36BIT_DEEP_COLOR 2
|
||||
# define HDMI_ACR_HW 0
|
||||
# define HDMI_ACR_32 1
|
||||
# define HDMI_ACR_44 2
|
||||
# define HDMI_ACR_48 3
|
||||
# define HDMI_ACR_X1 1
|
||||
# define HDMI_ACR_X2 2
|
||||
# define HDMI_ACR_X4 4
|
||||
# define AFMT_AVI_INFO_Y_RGB 0
|
||||
# define AFMT_AVI_INFO_Y_YCBCR422 1
|
||||
# define AFMT_AVI_INFO_Y_YCBCR444 2
|
||||
|
||||
#define NO_AUTO 0
|
||||
#define ES_AUTO 1
|
||||
#define GS_AUTO 2
|
||||
#define ES_AND_GS_AUTO 3
|
||||
|
||||
# define ARRAY_MODE(x) ((x) << 2)
|
||||
# define PIPE_CONFIG(x) ((x) << 6)
|
||||
# define TILE_SPLIT(x) ((x) << 11)
|
||||
# define MICRO_TILE_MODE_NEW(x) ((x) << 22)
|
||||
# define SAMPLE_SPLIT(x) ((x) << 25)
|
||||
# define BANK_WIDTH(x) ((x) << 0)
|
||||
# define BANK_HEIGHT(x) ((x) << 2)
|
||||
# define MACRO_TILE_ASPECT(x) ((x) << 4)
|
||||
# define NUM_BANKS(x) ((x) << 6)
|
||||
|
||||
#define MSG_ENTER_RLC_SAFE_MODE 1
|
||||
#define MSG_EXIT_RLC_SAFE_MODE 0
|
||||
|
||||
/*
|
||||
* PM4
|
||||
*/
|
||||
#define PACKET_TYPE0 0
|
||||
#define PACKET_TYPE1 1
|
||||
#define PACKET_TYPE2 2
|
||||
#define PACKET_TYPE3 3
|
||||
|
||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
||||
#define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF)
|
||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
||||
#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
|
||||
((reg) & 0xFFFF) | \
|
||||
((n) & 0x3FFF) << 16)
|
||||
#define CP_PACKET2 0x80000000
|
||||
#define PACKET2_PAD_SHIFT 0
|
||||
#define PACKET2_PAD_MASK (0x3fffffff << 0)
|
||||
|
||||
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
|
||||
|
||||
#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
|
||||
(((op) & 0xFF) << 8) | \
|
||||
((n) & 0x3FFF) << 16)
|
||||
|
||||
#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
|
||||
|
||||
/* Packet 3 types */
|
||||
#define PACKET3_NOP 0x10
|
||||
#define PACKET3_SET_BASE 0x11
|
||||
#define PACKET3_BASE_INDEX(x) ((x) << 0)
|
||||
#define CE_PARTITION_BASE 3
|
||||
#define PACKET3_CLEAR_STATE 0x12
|
||||
#define PACKET3_INDEX_BUFFER_SIZE 0x13
|
||||
#define PACKET3_DISPATCH_DIRECT 0x15
|
||||
#define PACKET3_DISPATCH_INDIRECT 0x16
|
||||
#define PACKET3_ATOMIC_GDS 0x1D
|
||||
#define PACKET3_ATOMIC_MEM 0x1E
|
||||
#define PACKET3_OCCLUSION_QUERY 0x1F
|
||||
#define PACKET3_SET_PREDICATION 0x20
|
||||
#define PACKET3_REG_RMW 0x21
|
||||
#define PACKET3_COND_EXEC 0x22
|
||||
#define PACKET3_PRED_EXEC 0x23
|
||||
#define PACKET3_DRAW_INDIRECT 0x24
|
||||
#define PACKET3_DRAW_INDEX_INDIRECT 0x25
|
||||
#define PACKET3_INDEX_BASE 0x26
|
||||
#define PACKET3_DRAW_INDEX_2 0x27
|
||||
#define PACKET3_CONTEXT_CONTROL 0x28
|
||||
#define PACKET3_INDEX_TYPE 0x2A
|
||||
#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
|
||||
#define PACKET3_DRAW_INDEX_AUTO 0x2D
|
||||
#define PACKET3_NUM_INSTANCES 0x2F
|
||||
#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
|
||||
#define PACKET3_INDIRECT_BUFFER_CONST 0x33
|
||||
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
|
||||
#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
|
||||
#define PACKET3_DRAW_PREAMBLE 0x36
|
||||
#define PACKET3_WRITE_DATA 0x37
|
||||
#define WRITE_DATA_DST_SEL(x) ((x) << 8)
|
||||
/* 0 - register
|
||||
* 1 - memory (sync - via GRBM)
|
||||
* 2 - gl2
|
||||
* 3 - gds
|
||||
* 4 - reserved
|
||||
* 5 - memory (async - direct)
|
||||
*/
|
||||
#define WR_ONE_ADDR (1 << 16)
|
||||
#define WR_CONFIRM (1 << 20)
|
||||
#define WRITE_DATA_CACHE_POLICY(x) ((x) << 25)
|
||||
/* 0 - LRU
|
||||
* 1 - Stream
|
||||
*/
|
||||
#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
|
||||
/* 0 - me
|
||||
* 1 - pfp
|
||||
* 2 - ce
|
||||
*/
|
||||
#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
|
||||
#define PACKET3_MEM_SEMAPHORE 0x39
|
||||
# define PACKET3_SEM_USE_MAILBOX (0x1 << 16)
|
||||
# define PACKET3_SEM_SEL_SIGNAL_TYPE (0x1 << 20) /* 0 = increment, 1 = write 1 */
|
||||
# define PACKET3_SEM_CLIENT_CODE ((x) << 24) /* 0 = CP, 1 = CB, 2 = DB */
|
||||
# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
|
||||
# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
|
||||
#define PACKET3_COPY_DW 0x3B
|
||||
#define PACKET3_WAIT_REG_MEM 0x3C
|
||||
#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
|
||||
/* 0 - always
|
||||
* 1 - <
|
||||
* 2 - <=
|
||||
* 3 - ==
|
||||
* 4 - !=
|
||||
* 5 - >=
|
||||
* 6 - >
|
||||
*/
|
||||
#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
|
||||
/* 0 - reg
|
||||
* 1 - mem
|
||||
*/
|
||||
#define WAIT_REG_MEM_OPERATION(x) ((x) << 6)
|
||||
/* 0 - wait_reg_mem
|
||||
* 1 - wr_wait_wr_reg
|
||||
*/
|
||||
#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
|
||||
/* 0 - me
|
||||
* 1 - pfp
|
||||
*/
|
||||
#define PACKET3_INDIRECT_BUFFER 0x3F
|
||||
#define INDIRECT_BUFFER_TCL2_VOLATILE (1 << 22)
|
||||
#define INDIRECT_BUFFER_VALID (1 << 23)
|
||||
#define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28)
|
||||
/* 0 - LRU
|
||||
* 1 - Stream
|
||||
* 2 - Bypass
|
||||
*/
|
||||
#define PACKET3_COPY_DATA 0x40
|
||||
#define PACKET3_PFP_SYNC_ME 0x42
|
||||
#define PACKET3_SURFACE_SYNC 0x43
|
||||
# define PACKET3_DEST_BASE_0_ENA (1 << 0)
|
||||
# define PACKET3_DEST_BASE_1_ENA (1 << 1)
|
||||
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
|
||||
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
|
||||
# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
|
||||
# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
|
||||
# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
|
||||
# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
|
||||
# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
|
||||
# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
|
||||
# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
|
||||
# define PACKET3_TCL1_VOL_ACTION_ENA (1 << 15)
|
||||
# define PACKET3_TC_VOL_ACTION_ENA (1 << 16) /* L2 */
|
||||
# define PACKET3_TC_WB_ACTION_ENA (1 << 18) /* L2 */
|
||||
# define PACKET3_DEST_BASE_2_ENA (1 << 19)
|
||||
# define PACKET3_DEST_BASE_3_ENA (1 << 21)
|
||||
# define PACKET3_TCL1_ACTION_ENA (1 << 22)
|
||||
# define PACKET3_TC_ACTION_ENA (1 << 23) /* L2 */
|
||||
# define PACKET3_CB_ACTION_ENA (1 << 25)
|
||||
# define PACKET3_DB_ACTION_ENA (1 << 26)
|
||||
# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
|
||||
# define PACKET3_SH_KCACHE_VOL_ACTION_ENA (1 << 28)
|
||||
# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
|
||||
#define PACKET3_COND_WRITE 0x45
|
||||
#define PACKET3_EVENT_WRITE 0x46
|
||||
#define EVENT_TYPE(x) ((x) << 0)
|
||||
#define EVENT_INDEX(x) ((x) << 8)
|
||||
/* 0 - any non-TS event
|
||||
* 1 - ZPASS_DONE, PIXEL_PIPE_STAT_*
|
||||
* 2 - SAMPLE_PIPELINESTAT
|
||||
* 3 - SAMPLE_STREAMOUTSTAT*
|
||||
* 4 - *S_PARTIAL_FLUSH
|
||||
* 5 - EOP events
|
||||
* 6 - EOS events
|
||||
*/
|
||||
#define PACKET3_EVENT_WRITE_EOP 0x47
|
||||
#define EOP_TCL1_VOL_ACTION_EN (1 << 12)
|
||||
#define EOP_TC_VOL_ACTION_EN (1 << 13) /* L2 */
|
||||
#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
|
||||
#define EOP_TCL1_ACTION_EN (1 << 16)
|
||||
#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
|
||||
#define EOP_TCL2_VOLATILE (1 << 24)
|
||||
#define EOP_CACHE_POLICY(x) ((x) << 25)
|
||||
/* 0 - LRU
|
||||
* 1 - Stream
|
||||
* 2 - Bypass
|
||||
*/
|
||||
#define DATA_SEL(x) ((x) << 29)
|
||||
/* 0 - discard
|
||||
* 1 - send low 32bit data
|
||||
* 2 - send 64bit data
|
||||
* 3 - send 64bit GPU counter value
|
||||
* 4 - send 64bit sys counter value
|
||||
*/
|
||||
#define INT_SEL(x) ((x) << 24)
|
||||
/* 0 - none
|
||||
* 1 - interrupt only (DATA_SEL = 0)
|
||||
* 2 - interrupt when data write is confirmed
|
||||
*/
|
||||
#define DST_SEL(x) ((x) << 16)
|
||||
/* 0 - MC
|
||||
* 1 - TC/L2
|
||||
*/
|
||||
#define PACKET3_EVENT_WRITE_EOS 0x48
|
||||
#define PACKET3_RELEASE_MEM 0x49
|
||||
#define PACKET3_PREAMBLE_CNTL 0x4A
|
||||
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
|
||||
# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
|
||||
#define PACKET3_DMA_DATA 0x50
|
||||
/* 1. header
|
||||
* 2. CONTROL
|
||||
* 3. SRC_ADDR_LO or DATA [31:0]
|
||||
* 4. SRC_ADDR_HI [31:0]
|
||||
* 5. DST_ADDR_LO [31:0]
|
||||
* 6. DST_ADDR_HI [7:0]
|
||||
* 7. COMMAND [30:21] | BYTE_COUNT [20:0]
|
||||
*/
|
||||
/* CONTROL */
|
||||
# define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0)
|
||||
/* 0 - ME
|
||||
* 1 - PFP
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
|
||||
/* 0 - LRU
|
||||
* 1 - Stream
|
||||
* 2 - Bypass
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
|
||||
# define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20)
|
||||
/* 0 - DST_ADDR using DAS
|
||||
* 1 - GDS
|
||||
* 3 - DST_ADDR using L2
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
|
||||
/* 0 - LRU
|
||||
* 1 - Stream
|
||||
* 2 - Bypass
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
|
||||
# define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29)
|
||||
/* 0 - SRC_ADDR using SAS
|
||||
* 1 - GDS
|
||||
* 2 - DATA
|
||||
* 3 - SRC_ADDR using L2
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_CP_SYNC (1 << 31)
|
||||
/* COMMAND */
|
||||
# define PACKET3_DMA_DATA_DIS_WC (1 << 21)
|
||||
# define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
|
||||
/* 0 - none
|
||||
* 1 - 8 in 16
|
||||
* 2 - 8 in 32
|
||||
* 3 - 8 in 64
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
|
||||
/* 0 - none
|
||||
* 1 - 8 in 16
|
||||
* 2 - 8 in 32
|
||||
* 3 - 8 in 64
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_CMD_SAS (1 << 26)
|
||||
/* 0 - memory
|
||||
* 1 - register
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_CMD_DAS (1 << 27)
|
||||
/* 0 - memory
|
||||
* 1 - register
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
|
||||
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
|
||||
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
|
||||
#define PACKET3_AQUIRE_MEM 0x58
|
||||
#define PACKET3_REWIND 0x59
|
||||
#define PACKET3_LOAD_UCONFIG_REG 0x5E
|
||||
#define PACKET3_LOAD_SH_REG 0x5F
|
||||
#define PACKET3_LOAD_CONFIG_REG 0x60
|
||||
#define PACKET3_LOAD_CONTEXT_REG 0x61
|
||||
#define PACKET3_SET_CONFIG_REG 0x68
|
||||
#define PACKET3_SET_CONFIG_REG_START 0x00002000
|
||||
#define PACKET3_SET_CONFIG_REG_END 0x00002c00
|
||||
#define PACKET3_SET_CONTEXT_REG 0x69
|
||||
#define PACKET3_SET_CONTEXT_REG_START 0x0000a000
|
||||
#define PACKET3_SET_CONTEXT_REG_END 0x0000a400
|
||||
#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
|
||||
#define PACKET3_SET_SH_REG 0x76
|
||||
#define PACKET3_SET_SH_REG_START 0x00002c00
|
||||
#define PACKET3_SET_SH_REG_END 0x00003000
|
||||
#define PACKET3_SET_SH_REG_OFFSET 0x77
|
||||
#define PACKET3_SET_QUEUE_REG 0x78
|
||||
#define PACKET3_SET_UCONFIG_REG 0x79
|
||||
#define PACKET3_SET_UCONFIG_REG_START 0x0000c000
|
||||
#define PACKET3_SET_UCONFIG_REG_END 0x0000c400
|
||||
#define PACKET3_SCRATCH_RAM_WRITE 0x7D
|
||||
#define PACKET3_SCRATCH_RAM_READ 0x7E
|
||||
#define PACKET3_LOAD_CONST_RAM 0x80
|
||||
#define PACKET3_WRITE_CONST_RAM 0x81
|
||||
#define PACKET3_DUMP_CONST_RAM 0x83
|
||||
#define PACKET3_INCREMENT_CE_COUNTER 0x84
|
||||
#define PACKET3_INCREMENT_DE_COUNTER 0x85
|
||||
#define PACKET3_WAIT_ON_CE_COUNTER 0x86
|
||||
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
|
||||
#define PACKET3_SWITCH_BUFFER 0x8B
|
||||
|
||||
/* SDMA - first instance at 0xd000, second at 0xd800 */
|
||||
#define SDMA0_REGISTER_OFFSET 0x0 /* not a register */
|
||||
#define SDMA1_REGISTER_OFFSET 0x200 /* not a register */
|
||||
#define SDMA_MAX_INSTANCE 2
|
||||
|
||||
#define SDMA_PACKET(op, sub_op, e) ((((e) & 0xFFFF) << 16) | \
|
||||
(((sub_op) & 0xFF) << 8) | \
|
||||
(((op) & 0xFF) << 0))
|
||||
/* sDMA opcodes */
|
||||
#define SDMA_OPCODE_NOP 0
|
||||
#define SDMA_OPCODE_COPY 1
|
||||
# define SDMA_COPY_SUB_OPCODE_LINEAR 0
|
||||
# define SDMA_COPY_SUB_OPCODE_TILED 1
|
||||
# define SDMA_COPY_SUB_OPCODE_SOA 3
|
||||
# define SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW 4
|
||||
# define SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW 5
|
||||
# define SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW 6
|
||||
#define SDMA_OPCODE_WRITE 2
|
||||
# define SDMA_WRITE_SUB_OPCODE_LINEAR 0
|
||||
# define SDMA_WRTIE_SUB_OPCODE_TILED 1
|
||||
#define SDMA_OPCODE_INDIRECT_BUFFER 4
|
||||
#define SDMA_OPCODE_FENCE 5
|
||||
#define SDMA_OPCODE_TRAP 6
|
||||
#define SDMA_OPCODE_SEMAPHORE 7
|
||||
# define SDMA_SEMAPHORE_EXTRA_O (1 << 13)
|
||||
/* 0 - increment
|
||||
* 1 - write 1
|
||||
*/
|
||||
# define SDMA_SEMAPHORE_EXTRA_S (1 << 14)
|
||||
/* 0 - wait
|
||||
* 1 - signal
|
||||
*/
|
||||
# define SDMA_SEMAPHORE_EXTRA_M (1 << 15)
|
||||
/* mailbox */
|
||||
#define SDMA_OPCODE_POLL_REG_MEM 8
|
||||
# define SDMA_POLL_REG_MEM_EXTRA_OP(x) ((x) << 10)
|
||||
/* 0 - wait_reg_mem
|
||||
* 1 - wr_wait_wr_reg
|
||||
*/
|
||||
# define SDMA_POLL_REG_MEM_EXTRA_FUNC(x) ((x) << 12)
|
||||
/* 0 - always
|
||||
* 1 - <
|
||||
* 2 - <=
|
||||
* 3 - ==
|
||||
* 4 - !=
|
||||
* 5 - >=
|
||||
* 6 - >
|
||||
*/
|
||||
# define SDMA_POLL_REG_MEM_EXTRA_M (1 << 15)
|
||||
/* 0 = register
|
||||
* 1 = memory
|
||||
*/
|
||||
#define SDMA_OPCODE_COND_EXEC 9
|
||||
#define SDMA_OPCODE_CONSTANT_FILL 11
|
||||
# define SDMA_CONSTANT_FILL_EXTRA_SIZE(x) ((x) << 14)
|
||||
/* 0 = byte fill
|
||||
* 2 = DW fill
|
||||
*/
|
||||
#define SDMA_OPCODE_GENERATE_PTE_PDE 12
|
||||
#define SDMA_OPCODE_TIMESTAMP 13
|
||||
# define SDMA_TIMESTAMP_SUB_OPCODE_SET_LOCAL 0
|
||||
# define SDMA_TIMESTAMP_SUB_OPCODE_GET_LOCAL 1
|
||||
# define SDMA_TIMESTAMP_SUB_OPCODE_GET_GLOBAL 2
|
||||
#define SDMA_OPCODE_SRBM_WRITE 14
|
||||
# define SDMA_SRBM_WRITE_EXTRA_BYTE_ENABLE(x) ((x) << 12)
|
||||
/* byte mask */
|
||||
|
||||
#define VCE_CMD_NO_OP 0x00000000
|
||||
#define VCE_CMD_END 0x00000001
|
||||
#define VCE_CMD_IB 0x00000002
|
||||
#define VCE_CMD_FENCE 0x00000003
|
||||
#define VCE_CMD_TRAP 0x00000004
|
||||
#define VCE_CMD_IB_AUTO 0x00000005
|
||||
#define VCE_CMD_SEMAPHORE 0x00000006
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue