mirror of https://gitee.com/openkylin/linux.git
drm fixes for 5.15-rc1
ttm: - Fix ttm_bo_move_memcpy() when ttm_resource is subclassed. - Fix ttm deadlock if target BO isn't idle - ttm build fix - ttm docs fix dma-buf: - config option fixes fbdev: - limit resolutions to avoid int overflow i915: - stddef change. amdgpu: - Misc cleanups, typo fixes - EEPROM fix - Add some new PCI IDs - Scatter/Gather display support for Yellow Carp - PCIe DPM fix for RKL platforms - RAS fix amdkfd: - SVM fix vc4: - static function fix mgag200: - fix uninit var panfrost: - lock_region fixes - Make some dma-buf config options depend on DMA_SHARED_BUFFER. - Handle multiplication overflow of fbdev xres/yres in the core. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmE6/HkACgkQDHTzWXnE hr4Edw/+PTYtJHSZZbcT/Avcdif1KpEWuBfhq+dd75Tm1SNYXBRe03CqH3d23YnZ 1I9oZ4TG1St3KaFBrlW5BERyFD2RhAAWJ4bMUz+/bBN9Y2u/r1scVR7YKoqkI2jr li1pYoPVLNYrHqdhmhsl7sKOqDRi/0TNvUY/B8tWyEZhTNiMGD9A8Tyv7WJ+iinT /mLrR0tCYYrzkvMEVdHt0t8+Bp1nvR/ZSfCS/NavD1CZ4RffENzTnFIhBb1QvCDj W1bF4D6930iOS/HXmheVzKygJlz9fj+8PS1DnvIyRPJjXH74dcCn+DPDRVTxyYB1 3ZSY0I2yFSK0oorN1jYVraDXGB1R0OtIwbdRWvyztqMxaj+gRrSNbSSEcRGAy4YL Ipyvd2FyHO1rGxN5CS6FDCkJ/9WxOx1caBF0D3HhZVGxqw/m8qISxS+za8U5lbrT 90KqHnaWbKL4flfUExjpwPKSvPImgLHN4tqC8l0471i4Tku0unBf8H9RkODkreRU fW9GHYCjzxHMwYT0JSHGohsscCvhIhkRYTYlx3bf/1tr0SfYXPiZEJwrJfNTLkZh mfm5R+wTL5hGHdDheOldjiGQZsazzxzJv2NK5aAuojVRqJuy3pohiQ72mHP5Wr4M 9zOKlXbgBDSxTJleN7MJKZhNyanFUaZut+1rhTFeQ4RCUcgqpxc= =R62Q -----END PGP SIGNATURE----- Merge tag 'drm-next-2021-09-10' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Just an initial bunch of fixes for the merge window, amdgpu is most of them with a few ttm fixes and an fbdev avoid multiply overflow fix. core: - Make some dma-buf config options depend on DMA_SHARED_BUFFER - Handle multiplication overflow of fbdev xres/yres in the core ttm: - Fix ttm_bo_move_memcpy() when ttm_resource is subclassed - Fix ttm deadlock if target BO isn't idle - ttm build fix - ttm docs fix dma-buf: - config option fixes fbdev: - limit resolutions to avoid int overflow i915: - stddef change. amdgpu: - Misc cleanups, typo fixes - EEPROM fix - Add some new PCI IDs - Scatter/Gather display support for Yellow Carp - PCIe DPM fix for RKL platforms - RAS fix amdkfd: - SVM fix vc4: - static function fix mgag200: - fix uninit var panfrost: - lock_region fixes" * tag 'drm-next-2021-09-10' of git://anongit.freedesktop.org/drm/drm: (36 commits) drm/ttm: Fix a deadlock if the target BO is not idle during swap fbmem: don't allow too huge resolutions dma-buf: DMABUF_SYSFS_STATS should depend on DMA_SHARED_BUFFER dma-buf: DMABUF_DEBUG should depend on DMA_SHARED_BUFFER drm/i915: use linux/stddef.h due to "isystem: trim/fixup stdarg.h and other headers" dma-buf: DMABUF_MOVE_NOTIFY should depend on DMA_SHARED_BUFFER drm/amdkfd: drop process ref count when xnack disable drm/amdgpu: enable more pm sysfs under SRIOV 1-VF mode drm/amdgpu: fix fdinfo race with process exit drm/amdgpu: Fix a deadlock if previous GEM object allocation fails drm/amdgpu: stop scheduler when calling hw_fini (v2) drm/amdgpu: Clear RAS interrupt status on aldebaran drm/amd/display: Initialize lt_settings on instantiation drm/amd/display: cleanup idents after a revert drm/amd/display: Fix memory leak reported by coverity drm/ttm: Fix ttm_bo_move_memcpy() for subclassed struct ttm_resource drm/amdgpu/swsmu: fix spelling mistake "minimun" -> "minimum" drm/amdgpu: Disable PCIE_DPM on Intel RKL Platform drm/amdgpu: show both cmd id and name when psp cmd failed drm/amd/display: setup system context for APUs ...
This commit is contained in:
commit
a668acb8f0
|
@ -37,7 +37,7 @@ TTM initialization
|
|||
This section is outdated.
|
||||
|
||||
Drivers wishing to support TTM must pass a filled :c:type:`ttm_bo_driver
|
||||
<ttm_bo_driver>` structure to ttm_bo_device_init, together with an
|
||||
<ttm_bo_driver>` structure to ttm_device_init, together with an
|
||||
initialized global reference to the memory manager. The ttm_bo_driver
|
||||
structure contains several fields with function pointers for
|
||||
initializing the TTM, allocating and freeing memory, waiting for command
|
||||
|
|
|
@ -42,6 +42,7 @@ config UDMABUF
|
|||
config DMABUF_MOVE_NOTIFY
|
||||
bool "Move notify between drivers (EXPERIMENTAL)"
|
||||
default n
|
||||
depends on DMA_SHARED_BUFFER
|
||||
help
|
||||
Don't pin buffers if the dynamic DMA-buf interface is available on
|
||||
both the exporter as well as the importer. This fixes a security
|
||||
|
@ -52,6 +53,7 @@ config DMABUF_MOVE_NOTIFY
|
|||
|
||||
config DMABUF_DEBUG
|
||||
bool "DMA-BUF debug checks"
|
||||
depends on DMA_SHARED_BUFFER
|
||||
default y if DMA_API_DEBUG
|
||||
help
|
||||
This option enables additional checks for DMA-BUF importers and
|
||||
|
@ -74,7 +76,7 @@ menuconfig DMABUF_HEAPS
|
|||
|
||||
menuconfig DMABUF_SYSFS_STATS
|
||||
bool "DMA-BUF sysfs statistics"
|
||||
select DMA_SHARED_BUFFER
|
||||
depends on DMA_SHARED_BUFFER
|
||||
help
|
||||
Choose this option to enable DMA-BUF sysfs statistics
|
||||
in location /sys/kernel/dmabuf/buffers.
|
||||
|
|
|
@ -468,14 +468,18 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
|
|||
return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function to query RAS EEPROM address
|
||||
/**
|
||||
* amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
|
||||
* adev: amdgpu_device pointer
|
||||
* i2c_address: pointer to u8; if not NULL, will contain
|
||||
* the RAS EEPROM address if the function returns true
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Return true if vbios supports ras rom address reporting
|
||||
* Return true if VBIOS supports RAS EEPROM address reporting,
|
||||
* else return false. If true and @i2c_address is not NULL,
|
||||
* will contain the RAS ROM address.
|
||||
*/
|
||||
bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
|
||||
bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
|
||||
u8 *i2c_address)
|
||||
{
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
int index;
|
||||
|
@ -483,27 +487,39 @@ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_a
|
|||
union firmware_info *firmware_info;
|
||||
u8 frev, crev;
|
||||
|
||||
if (i2c_address == NULL)
|
||||
return false;
|
||||
|
||||
*i2c_address = 0;
|
||||
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
firmwareinfo);
|
||||
firmwareinfo);
|
||||
|
||||
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
|
||||
index, &size, &frev, &crev, &data_offset)) {
|
||||
index, &size, &frev, &crev,
|
||||
&data_offset)) {
|
||||
/* support firmware_info 3.4 + */
|
||||
if ((frev == 3 && crev >=4) || (frev > 3)) {
|
||||
firmware_info = (union firmware_info *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
*i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
|
||||
/* The ras_rom_i2c_slave_addr should ideally
|
||||
* be a 19-bit EEPROM address, which would be
|
||||
* used as is by the driver; see top of
|
||||
* amdgpu_eeprom.c.
|
||||
*
|
||||
* When this is the case, 0 is of course a
|
||||
* valid RAS EEPROM address, in which case,
|
||||
* we'll drop the first "if (firm...)" and only
|
||||
* leave the check for the pointer.
|
||||
*
|
||||
* The reason this works right now is because
|
||||
* ras_rom_i2c_slave_addr contains the EEPROM
|
||||
* device type qualifier 1010b in the top 4
|
||||
* bits.
|
||||
*/
|
||||
if (firmware_info->v34.ras_rom_i2c_slave_addr) {
|
||||
if (i2c_address)
|
||||
*i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (*i2c_address != 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -522,6 +522,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
|
|||
break;
|
||||
case CHIP_RENOIR:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
domain |= AMDGPU_GEM_DOMAIN_GTT;
|
||||
break;
|
||||
|
||||
|
|
|
@ -1181,7 +1181,12 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73A5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73AC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73AD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
|
||||
|
@ -1197,6 +1202,11 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
|
||||
{0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
|
||||
{0x1002, 0x73C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
|
||||
{0x1002, 0x73DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
|
||||
{0x1002, 0x73DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
|
||||
{0x1002, 0x73DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
|
||||
{0x1002, 0x73DD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
|
||||
{0x1002, 0x73DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
|
||||
{0x1002, 0x73DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
|
||||
|
||||
/* DIMGREY_CAVEFISH */
|
||||
|
@ -1204,6 +1214,13 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
|
||||
{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
|
||||
{0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
|
||||
{0x1002, 0x73E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
|
||||
{0x1002, 0x73E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
|
||||
{0x1002, 0x73EA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
|
||||
{0x1002, 0x73EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
|
||||
{0x1002, 0x73EC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
|
||||
{0x1002, 0x73ED, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
|
||||
{0x1002, 0x73EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
|
||||
{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
|
||||
|
||||
/* Aldebaran */
|
||||
|
|
|
@ -59,6 +59,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
|
|||
uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
|
||||
struct drm_file *file = f->private_data;
|
||||
struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
|
||||
struct amdgpu_bo *root;
|
||||
int ret;
|
||||
|
||||
ret = amdgpu_file_to_fpriv(f, &fpriv);
|
||||
|
@ -69,13 +70,19 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
|
|||
dev = PCI_SLOT(adev->pdev->devfn);
|
||||
fn = PCI_FUNC(adev->pdev->devfn);
|
||||
|
||||
ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
|
||||
root = amdgpu_bo_ref(fpriv->vm.root.bo);
|
||||
if (!root)
|
||||
return;
|
||||
|
||||
ret = amdgpu_bo_reserve(root, false);
|
||||
if (ret) {
|
||||
DRM_ERROR("Fail to reserve bo\n");
|
||||
return;
|
||||
}
|
||||
amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, >t_mem, &cpu_mem);
|
||||
amdgpu_bo_unreserve(fpriv->vm.root.bo);
|
||||
amdgpu_bo_unreserve(root);
|
||||
amdgpu_bo_unref(&root);
|
||||
|
||||
seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
|
||||
dev, fn, fpriv->vm.pasid);
|
||||
seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
|
||||
|
|
|
@ -552,6 +552,9 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
|
|||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
|
||||
if (!ring->no_scheduler)
|
||||
drm_sched_stop(&ring->sched, NULL);
|
||||
|
||||
/* You can't wait for HW to signal if it's gone */
|
||||
if (!drm_dev_is_unplugged(&adev->ddev))
|
||||
r = amdgpu_fence_wait_empty(ring);
|
||||
|
@ -611,6 +614,11 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
|
|||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
|
||||
if (!ring->no_scheduler) {
|
||||
drm_sched_resubmit_jobs(&ring->sched);
|
||||
drm_sched_start(&ring->sched, true);
|
||||
}
|
||||
|
||||
/* enable the interrupt */
|
||||
if (ring->fence_drv.irq_src)
|
||||
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
|
||||
|
|
|
@ -341,21 +341,18 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
|
||||
initial_domain,
|
||||
flags, ttm_bo_type_device, resv, &gobj);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
|
||||
flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
|
||||
goto retry;
|
||||
}
|
||||
DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
|
||||
size, initial_domain, args->in.alignment, r);
|
||||
if (r && r != -ERESTARTSYS) {
|
||||
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
|
||||
flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
goto retry;
|
||||
}
|
||||
return r;
|
||||
|
||||
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
|
||||
goto retry;
|
||||
}
|
||||
DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
|
||||
size, initial_domain, args->in.alignment, r);
|
||||
}
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
|
||||
|
|
|
@ -118,7 +118,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
|
|||
* @man: TTM memory type manager
|
||||
* @tbo: TTM BO we need this range for
|
||||
* @place: placement flags and restrictions
|
||||
* @mem: the resulting mem object
|
||||
* @res: the resulting mem object
|
||||
*
|
||||
* Dummy, allocate the node but no space for it yet.
|
||||
*/
|
||||
|
@ -182,7 +182,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
|
|||
* amdgpu_gtt_mgr_del - free ranges
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @mem: TTM memory object
|
||||
* @res: TTM memory object
|
||||
*
|
||||
* Free the allocated GTT again.
|
||||
*/
|
||||
|
|
|
@ -469,10 +469,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
|||
*/
|
||||
if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
|
||||
if (ucode)
|
||||
DRM_WARN("failed to load ucode (%s) ",
|
||||
amdgpu_ucode_name(ucode->ucode_id));
|
||||
DRM_WARN("psp gfx command (%s) failed and response status is (0x%X)\n",
|
||||
psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
|
||||
DRM_WARN("failed to load ucode %s(0x%X) ",
|
||||
amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
|
||||
DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
|
||||
psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
|
||||
psp->cmd_buf_mem->resp.status);
|
||||
if (!timeout) {
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -114,27 +114,24 @@ static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
|
|||
static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
|
||||
struct amdgpu_ras_eeprom_control *control)
|
||||
{
|
||||
uint8_t ras_rom_i2c_slave_addr;
|
||||
u8 i2c_addr;
|
||||
|
||||
if (!control)
|
||||
return false;
|
||||
|
||||
control->i2c_address = 0;
|
||||
if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
|
||||
/* The address given by VBIOS is an 8-bit, wire-format
|
||||
* address, i.e. the most significant byte.
|
||||
*
|
||||
* Normalize it to a 19-bit EEPROM address. Remove the
|
||||
* device type identifier and make it a 7-bit address;
|
||||
* then make it a 19-bit EEPROM address. See top of
|
||||
* amdgpu_eeprom.c.
|
||||
*/
|
||||
i2c_addr = (i2c_addr & 0x0F) >> 1;
|
||||
control->i2c_address = ((u32) i2c_addr) << 16;
|
||||
|
||||
if (amdgpu_atomfirmware_ras_rom_addr(adev, &ras_rom_i2c_slave_addr))
|
||||
{
|
||||
switch (ras_rom_i2c_slave_addr) {
|
||||
case 0xA0:
|
||||
control->i2c_address = 0;
|
||||
return true;
|
||||
case 0xA8:
|
||||
control->i2c_address = 0x40000;
|
||||
return true;
|
||||
default:
|
||||
dev_warn(adev->dev, "RAS EEPROM I2C slave address %02x not supported",
|
||||
ras_rom_i2c_slave_addr);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
switch (adev->asic_type) {
|
||||
|
|
|
@ -361,7 +361,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
|
|||
* @man: TTM memory type manager
|
||||
* @tbo: TTM BO we need this range for
|
||||
* @place: placement flags and restrictions
|
||||
* @mem: the resulting mem object
|
||||
* @res: the resulting mem object
|
||||
*
|
||||
* Allocate VRAM for the given BO.
|
||||
*/
|
||||
|
@ -487,7 +487,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
* amdgpu_vram_mgr_del - free ranges
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @mem: TTM memory object
|
||||
* @res: TTM memory object
|
||||
*
|
||||
* Free the allocated VRAM again.
|
||||
*/
|
||||
|
@ -522,7 +522,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
|
|||
* amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @mem: TTM memory object
|
||||
* @res: TTM memory object
|
||||
* @offset: byte offset from the base of VRAM BO
|
||||
* @length: number of bytes to export in sg_table
|
||||
* @dev: the other device
|
||||
|
|
|
@ -258,6 +258,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
|
|||
amdgpu_virt_fini_data_exchange(adev);
|
||||
atomic_set(&adev->in_gpu_reset, 1);
|
||||
|
||||
xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
|
||||
|
||||
do {
|
||||
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
|
||||
goto flr_done;
|
||||
|
|
|
@ -37,6 +37,7 @@ enum idh_request {
|
|||
IDH_REQ_GPU_RESET_ACCESS,
|
||||
|
||||
IDH_LOG_VF_ERROR = 200,
|
||||
IDH_READY_TO_RESET = 201,
|
||||
};
|
||||
|
||||
enum idh_event {
|
||||
|
|
|
@ -85,11 +85,14 @@
|
|||
#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015
|
||||
#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2
|
||||
|
||||
#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x3878
|
||||
#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x00fe
|
||||
#define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX 2
|
||||
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
|
||||
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
|
||||
|
||||
#define mmBIF_INTR_CNTL_ALDE 0x0101
|
||||
#define mmBIF_INTR_CNTL_ALDE_BASE_IDX 2
|
||||
|
||||
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status);
|
||||
|
||||
|
@ -440,14 +443,23 @@ static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
|
|||
*/
|
||||
uint32_t bif_intr_cntl;
|
||||
|
||||
bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
|
||||
else
|
||||
bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
|
||||
|
||||
if (state == AMDGPU_IRQ_STATE_ENABLE) {
|
||||
/* set interrupt vector select bit to 0 to select
|
||||
* vetcor 1 for bare metal case */
|
||||
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
|
||||
BIF_INTR_CNTL,
|
||||
RAS_INTR_VEC_SEL, 0);
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
|
||||
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
|
||||
else
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -476,14 +488,22 @@ static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *ade
|
|||
*/
|
||||
uint32_t bif_intr_cntl;
|
||||
|
||||
bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
|
||||
else
|
||||
bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
|
||||
|
||||
if (state == AMDGPU_IRQ_STATE_ENABLE) {
|
||||
/* set interrupt vector select bit to 0 to select
|
||||
* vetcor 1 for bare metal case */
|
||||
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
|
||||
BIF_INTR_CNTL,
|
||||
RAS_INTR_VEC_SEL, 0);
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
|
||||
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
|
||||
else
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -904,14 +904,7 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
|
|||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_TOPAZ:
|
||||
/* Disable BACO support for the specific polaris12 SKU temporarily */
|
||||
if ((adev->pdev->device == 0x699F) &&
|
||||
(adev->pdev->revision == 0xC7) &&
|
||||
(adev->pdev->subsystem_vendor == 0x1028) &&
|
||||
(adev->pdev->subsystem_device == 0x0039))
|
||||
return false;
|
||||
else
|
||||
return amdgpu_dpm_is_baco_supported(adev);
|
||||
return amdgpu_dpm_is_baco_supported(adev);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -2484,7 +2484,8 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
|||
}
|
||||
if (!p->xnack_enabled) {
|
||||
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
|
||||
return -EFAULT;
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
svms = &p->svms;
|
||||
|
||||
|
|
|
@ -1200,7 +1200,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
dc_hardware_init(adev->dm.dc);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (adev->apu_flags) {
|
||||
if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
|
||||
struct dc_phy_addr_space_config pa_config;
|
||||
|
||||
mmhub_read_system_context(adev, &pa_config);
|
||||
|
|
|
@ -1561,7 +1561,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
|
|||
struct dc_link *link,
|
||||
const struct dc_link_settings *link_setting)
|
||||
{
|
||||
struct link_training_settings lt_settings;
|
||||
struct link_training_settings lt_settings = {0};
|
||||
|
||||
dp_decide_training_settings(
|
||||
link,
|
||||
|
@ -1707,7 +1707,7 @@ enum link_training_result dc_link_dp_perform_link_training(
|
|||
bool skip_video_pattern)
|
||||
{
|
||||
enum link_training_result status = LINK_TRAINING_SUCCESS;
|
||||
struct link_training_settings lt_settings;
|
||||
struct link_training_settings lt_settings = {0};
|
||||
enum dp_link_encoding encoding =
|
||||
dp_get_link_encoding_format(link_settings);
|
||||
|
||||
|
@ -1923,7 +1923,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
|
|||
struct dc_link_settings *link_settings,
|
||||
struct dc_link_training_overrides *lt_overrides)
|
||||
{
|
||||
struct link_training_settings lt_settings;
|
||||
struct link_training_settings lt_settings = {0};
|
||||
enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
|
||||
enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
|
||||
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
|
||||
|
|
|
@ -510,8 +510,12 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
|
|||
vpg = dcn303_vpg_create(ctx, vpg_inst);
|
||||
afmt = dcn303_afmt_create(ctx, afmt_inst);
|
||||
|
||||
if (!enc1 || !vpg || !afmt)
|
||||
if (!enc1 || !vpg || !afmt) {
|
||||
kfree(enc1);
|
||||
kfree(vpg);
|
||||
kfree(afmt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id],
|
||||
&se_shift, &se_mask);
|
||||
|
|
|
@ -109,7 +109,7 @@ bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
|
|||
union dmub_rb_cmd cmd;
|
||||
|
||||
if (!dcn31_query_backlight_info(panel_cntl, &cmd))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
return cmd.panel_cntl.data.is_backlight_on;
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
|
|||
union dmub_rb_cmd cmd;
|
||||
|
||||
if (!dcn31_query_backlight_info(panel_cntl, &cmd))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
return cmd.panel_cntl.data.is_powered_on;
|
||||
}
|
||||
|
|
|
@ -2641,7 +2641,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
|
|||
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
|
||||
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
|
||||
if (mode_lib->vba.DRAMClockChangeWatermark >
|
||||
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
|
||||
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
|
||||
mode_lib->vba.MinTTUVBlank[k] += 25;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2005,10 +2005,10 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
|||
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
|
||||
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
|
||||
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
|
||||
AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
|
||||
|
|
|
@ -1335,6 +1335,30 @@ enum smu_cmn2asic_mapping_type {
|
|||
#define WORKLOAD_MAP(profile, workload) \
|
||||
[profile] = {1, (workload)}
|
||||
|
||||
/**
|
||||
* smu_memcpy_trailing - Copy the end of one structure into the middle of another
|
||||
*
|
||||
* @dst: Pointer to destination struct
|
||||
* @first_dst_member: The member name in @dst where the overwrite begins
|
||||
* @last_dst_member: The member name in @dst where the overwrite ends after
|
||||
* @src: Pointer to the source struct
|
||||
* @first_src_member: The member name in @src where the copy begins
|
||||
*
|
||||
*/
|
||||
#define smu_memcpy_trailing(dst, first_dst_member, last_dst_member, \
|
||||
src, first_src_member) \
|
||||
({ \
|
||||
size_t __src_offset = offsetof(typeof(*(src)), first_src_member); \
|
||||
size_t __src_size = sizeof(*(src)) - __src_offset; \
|
||||
size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member); \
|
||||
size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \
|
||||
__dst_offset; \
|
||||
BUILD_BUG_ON(__src_size != __dst_size); \
|
||||
__builtin_memcpy((u8 *)(dst) + __dst_offset, \
|
||||
(u8 *)(src) + __src_offset, \
|
||||
__dst_size); \
|
||||
})
|
||||
|
||||
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
|
||||
int smu_get_power_limit(void *handle,
|
||||
uint32_t *limit,
|
||||
|
|
|
@ -27,6 +27,9 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/div64.h>
|
||||
#if IS_ENABLED(CONFIG_X86_64)
|
||||
#include <asm/intel-family.h>
|
||||
#endif
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "ppatomctrl.h"
|
||||
#include "atombios.h"
|
||||
|
@ -1733,6 +1736,17 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
return result;
|
||||
}
|
||||
|
||||
static bool intel_core_rkl_chk(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_X86_64)
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
|
@ -1758,7 +1772,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
|||
|
||||
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
|
||||
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
|
||||
data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
|
||||
data->pcie_dpm_key_disabled =
|
||||
intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
|
||||
/* need to set voltage control types before EVV patching */
|
||||
data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
|
||||
data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
|
||||
|
|
|
@ -483,10 +483,8 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
|
|||
|
||||
if ((smc_dpm_table->table_header.format_revision == 4) &&
|
||||
(smc_dpm_table->table_header.content_revision == 6))
|
||||
memcpy(&smc_pptable->MaxVoltageStepGfx,
|
||||
&smc_dpm_table->maxvoltagestepgfx,
|
||||
sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_6, maxvoltagestepgfx));
|
||||
|
||||
smu_memcpy_trailing(smc_pptable, MaxVoltageStepGfx, BoardReserved,
|
||||
smc_dpm_table, maxvoltagestepgfx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -431,16 +431,16 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
|
|||
|
||||
switch (smc_dpm_table->table_header.content_revision) {
|
||||
case 5: /* nv10 and nv14 */
|
||||
memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
|
||||
sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
|
||||
smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
|
||||
smc_dpm_table, I2cControllers);
|
||||
break;
|
||||
case 7: /* nv12 */
|
||||
ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
|
||||
(uint8_t **)&smc_dpm_table_v4_7);
|
||||
if (ret)
|
||||
return ret;
|
||||
memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
|
||||
sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
|
||||
smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
|
||||
smc_dpm_table_v4_7, I2cControllers);
|
||||
break;
|
||||
default:
|
||||
dev_err(smu->adev->dev, "smc_dpm_info with unsupported content revision %d!\n",
|
||||
|
|
|
@ -1869,7 +1869,7 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
|
|||
} else {
|
||||
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
|
||||
dev_err(smu->adev->dev,
|
||||
"The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
|
||||
"The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
|
||||
smu->gfx_actual_hard_min_freq,
|
||||
smu->gfx_actual_soft_max_freq);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -426,7 +426,7 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
|
|||
} else {
|
||||
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
|
||||
dev_err(smu->adev->dev,
|
||||
"The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
|
||||
"The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
|
||||
smu->gfx_actual_hard_min_freq,
|
||||
smu->gfx_actual_soft_max_freq);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -409,9 +409,8 @@ static int aldebaran_append_powerplay_table(struct smu_context *smu)
|
|||
|
||||
if ((smc_dpm_table->table_header.format_revision == 4) &&
|
||||
(smc_dpm_table->table_header.content_revision == 10))
|
||||
memcpy(&smc_pptable->GfxMaxCurrent,
|
||||
&smc_dpm_table->GfxMaxCurrent,
|
||||
sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_10, GfxMaxCurrent));
|
||||
smu_memcpy_trailing(smc_pptable, GfxMaxCurrent, reserved,
|
||||
smc_dpm_table, GfxMaxCurrent);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -731,7 +731,7 @@ static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM
|
|||
} else {
|
||||
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
|
||||
dev_err(smu->adev->dev,
|
||||
"The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
|
||||
"The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
|
||||
smu->gfx_actual_hard_min_freq,
|
||||
smu->gfx_actual_soft_max_freq);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#ifndef INTEL_GT_REQUESTS_H
|
||||
#define INTEL_GT_REQUESTS_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <linux/stddef.h>
|
||||
|
||||
struct intel_engine_cs;
|
||||
struct intel_gt;
|
||||
|
|
|
@ -124,6 +124,7 @@ static int mgag200_pixpll_compute_g200se_00(struct mgag200_pll *pixpll, long clo
|
|||
unsigned int computed;
|
||||
|
||||
m = n = p = s = 0;
|
||||
delta = 0xffffffff;
|
||||
permitteddelta = clock * 5 / 1000;
|
||||
|
||||
for (testp = 8; testp > 0; testp /= 2) {
|
||||
|
|
|
@ -58,25 +58,16 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
|
|||
}
|
||||
|
||||
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
|
||||
u64 iova, size_t size)
|
||||
u64 iova, u64 size)
|
||||
{
|
||||
u8 region_width;
|
||||
u64 region = iova & PAGE_MASK;
|
||||
/*
|
||||
* fls returns:
|
||||
* 1 .. 32
|
||||
*
|
||||
* 10 + fls(num_pages)
|
||||
* results in the range (11 .. 42)
|
||||
|
||||
/* The size is encoded as ceil(log2) minus(1), which may be calculated
|
||||
* with fls. The size must be clamped to hardware bounds.
|
||||
*/
|
||||
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
|
||||
region_width = 10 + fls(size >> PAGE_SHIFT);
|
||||
if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
|
||||
/* not pow2, so must go up to the next pow2 */
|
||||
region_width += 1;
|
||||
}
|
||||
size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
|
||||
region_width = fls64(size - 1) - 1;
|
||||
region |= region_width;
|
||||
|
||||
/* Lock the region that needs to be updated */
|
||||
|
@ -87,7 +78,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
|
|||
|
||||
|
||||
static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
|
||||
u64 iova, size_t size, u32 op)
|
||||
u64 iova, u64 size, u32 op)
|
||||
{
|
||||
if (as_nr < 0)
|
||||
return 0;
|
||||
|
@ -104,7 +95,7 @@ static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
|
|||
|
||||
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
|
||||
struct panfrost_mmu *mmu,
|
||||
u64 iova, size_t size, u32 op)
|
||||
u64 iova, u64 size, u32 op)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -121,7 +112,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
|
|||
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
|
||||
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
|
||||
|
||||
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
|
||||
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
|
||||
|
||||
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
|
||||
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
|
||||
|
@ -137,7 +128,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
|
|||
|
||||
static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
|
||||
{
|
||||
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
|
||||
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
|
||||
|
||||
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
|
||||
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
|
||||
|
@ -251,7 +242,7 @@ static size_t get_pgsize(u64 addr, size_t size)
|
|||
|
||||
static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
|
||||
struct panfrost_mmu *mmu,
|
||||
u64 iova, size_t size)
|
||||
u64 iova, u64 size)
|
||||
{
|
||||
if (mmu->as < 0)
|
||||
return;
|
||||
|
|
|
@ -316,6 +316,8 @@
|
|||
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
|
||||
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
|
||||
|
||||
#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
|
||||
|
||||
#define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
|
||||
#define gpu_read(dev, reg) readl(dev->iomem + reg)
|
||||
|
||||
|
|
|
@ -1160,9 +1160,9 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
|
|||
}
|
||||
|
||||
if (bo->deleted) {
|
||||
ttm_bo_cleanup_refs(bo, false, false, locked);
|
||||
ret = ttm_bo_cleanup_refs(bo, false, false, locked);
|
||||
ttm_bo_put(bo);
|
||||
return 0;
|
||||
return ret == -EBUSY ? -ENOSPC : ret;
|
||||
}
|
||||
|
||||
ttm_bo_del_from_lru(bo);
|
||||
|
@ -1216,7 +1216,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
|
|||
if (locked)
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
ttm_bo_put(bo);
|
||||
return ret;
|
||||
return ret == -EBUSY ? -ENOSPC : ret;
|
||||
}
|
||||
|
||||
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
|
||||
|
|
|
@ -143,7 +143,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
|||
struct ttm_resource *src_mem = bo->resource;
|
||||
struct ttm_resource_manager *src_man =
|
||||
ttm_manager_type(bdev, src_mem->mem_type);
|
||||
struct ttm_resource src_copy = *src_mem;
|
||||
union {
|
||||
struct ttm_kmap_iter_tt tt;
|
||||
struct ttm_kmap_iter_linear_io io;
|
||||
|
@ -173,11 +172,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
|||
}
|
||||
|
||||
ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
|
||||
src_copy = *src_mem;
|
||||
ttm_bo_move_sync_cleanup(bo, dst_mem);
|
||||
|
||||
if (!src_iter->ops->maps_tt)
|
||||
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
|
||||
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
|
||||
ttm_bo_move_sync_cleanup(bo, dst_mem);
|
||||
|
||||
out_src_iter:
|
||||
if (!dst_iter->ops->maps_tt)
|
||||
ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/file.h>
|
||||
#include <drm/drm_cache.h>
|
||||
|
|
|
@ -1462,7 +1462,7 @@ static const struct hdmi_codec_ops vc4_hdmi_codec_ops = {
|
|||
.audio_startup = vc4_hdmi_audio_startup,
|
||||
};
|
||||
|
||||
struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
|
||||
static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
|
||||
.ops = &vc4_hdmi_codec_ops,
|
||||
.max_i2s_channels = 8,
|
||||
.i2s = 1,
|
||||
|
|
|
@ -962,6 +962,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
|
|||
struct fb_var_screeninfo old_var;
|
||||
struct fb_videomode mode;
|
||||
struct fb_event event;
|
||||
u32 unused;
|
||||
|
||||
if (var->activate & FB_ACTIVATE_INV_MODE) {
|
||||
struct fb_videomode mode1, mode2;
|
||||
|
@ -1008,6 +1009,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
|
|||
if (var->xres < 8 || var->yres < 8)
|
||||
return -EINVAL;
|
||||
|
||||
/* Too huge resolution causes multiplication overflow. */
|
||||
if (check_mul_overflow(var->xres, var->yres, &unused) ||
|
||||
check_mul_overflow(var->xres_virtual, var->yres_virtual, &unused))
|
||||
return -EINVAL;
|
||||
|
||||
ret = info->fbops->fb_check_var(var, info);
|
||||
|
||||
if (ret)
|
||||
|
|
|
@ -27,11 +27,12 @@
|
|||
#ifndef _TTM_TT_H_
|
||||
#define _TTM_TT_H_
|
||||
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/types.h>
|
||||
#include <drm/ttm/ttm_caching.h>
|
||||
#include <drm/ttm/ttm_kmap_iter.h>
|
||||
|
||||
struct ttm_bo_device;
|
||||
struct ttm_device;
|
||||
struct ttm_tt;
|
||||
struct ttm_resource;
|
||||
struct ttm_buffer_object;
|
||||
|
|
Loading…
Reference in New Issue