drm/amdkfd: Consolidate and clean up log commands

Consolidate log commands so that dev_info(NULL, "Error...") uses the more
accurate pr_err, remove the module name from the log (can be seen via
dynamic debugging with +m), and the function name (can be seen via
dynamic debugging with +f). We also don't need debug messages saying
what function we're in. Those can be added by devs when needed

Don't print vendor and device ID in error messages. They are typically
the same for all GPUs in a multi-GPU system. So this doesn't add any
value to the message.

Lastly, remove parentheses around %d, %i and 0x%llX.
According to kernel.org:
"Printing numbers in parentheses (%d) adds no value and should be
avoided."

Signed-off-by: Kent Russell <kent.russell@amd.com>
Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
This commit is contained in:
Kent Russell 2017-08-15 23:00:05 -04:00 committed by Oded Gabbay
parent 8eabaf54cf
commit 79775b627d
17 changed files with 158 additions and 236 deletions

View File

@ -142,12 +142,12 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
struct kfd_ioctl_create_queue_args *args) struct kfd_ioctl_create_queue_args *args)
{ {
if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL; return -EINVAL;
} }
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL; return -EINVAL;
} }
@ -155,26 +155,26 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
(!access_ok(VERIFY_WRITE, (!access_ok(VERIFY_WRITE,
(const void __user *) args->ring_base_address, (const void __user *) args->ring_base_address,
sizeof(uint64_t)))) { sizeof(uint64_t)))) {
pr_err("kfd: can't access ring base address\n"); pr_err("Can't access ring base address\n");
return -EFAULT; return -EFAULT;
} }
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
pr_err("kfd: ring size must be a power of 2 or 0\n"); pr_err("Ring size must be a power of 2 or 0\n");
return -EINVAL; return -EINVAL;
} }
if (!access_ok(VERIFY_WRITE, if (!access_ok(VERIFY_WRITE,
(const void __user *) args->read_pointer_address, (const void __user *) args->read_pointer_address,
sizeof(uint32_t))) { sizeof(uint32_t))) {
pr_err("kfd: can't access read pointer\n"); pr_err("Can't access read pointer\n");
return -EFAULT; return -EFAULT;
} }
if (!access_ok(VERIFY_WRITE, if (!access_ok(VERIFY_WRITE,
(const void __user *) args->write_pointer_address, (const void __user *) args->write_pointer_address,
sizeof(uint32_t))) { sizeof(uint32_t))) {
pr_err("kfd: can't access write pointer\n"); pr_err("Can't access write pointer\n");
return -EFAULT; return -EFAULT;
} }
@ -182,7 +182,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
!access_ok(VERIFY_WRITE, !access_ok(VERIFY_WRITE,
(const void __user *) args->eop_buffer_address, (const void __user *) args->eop_buffer_address,
sizeof(uint32_t))) { sizeof(uint32_t))) {
pr_debug("kfd: can't access eop buffer"); pr_debug("Can't access eop buffer");
return -EFAULT; return -EFAULT;
} }
@ -190,7 +190,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
!access_ok(VERIFY_WRITE, !access_ok(VERIFY_WRITE,
(const void __user *) args->ctx_save_restore_address, (const void __user *) args->ctx_save_restore_address,
sizeof(uint32_t))) { sizeof(uint32_t))) {
pr_debug("kfd: can't access ctx save restore buffer"); pr_debug("Can't access ctx save restore buffer");
return -EFAULT; return -EFAULT;
} }
@ -219,27 +219,27 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
else else
q_properties->format = KFD_QUEUE_FORMAT_PM4; q_properties->format = KFD_QUEUE_FORMAT_PM4;
pr_debug("Queue Percentage (%d, %d)\n", pr_debug("Queue Percentage: %d, %d\n",
q_properties->queue_percent, args->queue_percentage); q_properties->queue_percent, args->queue_percentage);
pr_debug("Queue Priority (%d, %d)\n", pr_debug("Queue Priority: %d, %d\n",
q_properties->priority, args->queue_priority); q_properties->priority, args->queue_priority);
pr_debug("Queue Address (0x%llX, 0x%llX)\n", pr_debug("Queue Address: 0x%llX, 0x%llX\n",
q_properties->queue_address, args->ring_base_address); q_properties->queue_address, args->ring_base_address);
pr_debug("Queue Size (0x%llX, %u)\n", pr_debug("Queue Size: 0x%llX, %u\n",
q_properties->queue_size, args->ring_size); q_properties->queue_size, args->ring_size);
pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n", pr_debug("Queue r/w Pointers: %p, %p\n",
(uint64_t) q_properties->read_ptr, q_properties->read_ptr,
(uint64_t) q_properties->write_ptr); q_properties->write_ptr);
pr_debug("Queue Format (%d)\n", q_properties->format); pr_debug("Queue Format: %d\n", q_properties->format);
pr_debug("Queue EOP (0x%llX)\n", q_properties->eop_ring_buffer_address); pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
pr_debug("Queue CTX save arex (0x%llX)\n", pr_debug("Queue CTX save area: 0x%llX\n",
q_properties->ctx_save_restore_area_address); q_properties->ctx_save_restore_area_address);
return 0; return 0;
@ -257,16 +257,16 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
memset(&q_properties, 0, sizeof(struct queue_properties)); memset(&q_properties, 0, sizeof(struct queue_properties));
pr_debug("kfd: creating queue ioctl\n"); pr_debug("Creating queue ioctl\n");
err = set_queue_properties_from_user(&q_properties, args); err = set_queue_properties_from_user(&q_properties, args);
if (err) if (err)
return err; return err;
pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id); pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
dev = kfd_device_by_id(args->gpu_id); dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL) { if (dev == NULL) {
pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id); pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
return -EINVAL; return -EINVAL;
} }
@ -278,7 +278,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process; goto err_bind_process;
} }
pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n", pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
p->pasid, p->pasid,
dev->id); dev->id);
@ -296,15 +296,15 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
pr_debug("kfd: queue id %d was created successfully\n", args->queue_id); pr_debug("Queue id %d was created successfully\n", args->queue_id);
pr_debug("ring buffer address == 0x%016llX\n", pr_debug("Ring buffer address == 0x%016llX\n",
args->ring_base_address); args->ring_base_address);
pr_debug("read ptr address == 0x%016llX\n", pr_debug("Read ptr address == 0x%016llX\n",
args->read_pointer_address); args->read_pointer_address);
pr_debug("write ptr address == 0x%016llX\n", pr_debug("Write ptr address == 0x%016llX\n",
args->write_pointer_address); args->write_pointer_address);
return 0; return 0;
@ -321,7 +321,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval; int retval;
struct kfd_ioctl_destroy_queue_args *args = data; struct kfd_ioctl_destroy_queue_args *args = data;
pr_debug("kfd: destroying queue id %d for PASID %d\n", pr_debug("Destroying queue id %d for pasid %d\n",
args->queue_id, args->queue_id,
p->pasid); p->pasid);
@ -341,12 +341,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
struct queue_properties properties; struct queue_properties properties;
if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL; return -EINVAL;
} }
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL; return -EINVAL;
} }
@ -354,12 +354,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
(!access_ok(VERIFY_WRITE, (!access_ok(VERIFY_WRITE,
(const void __user *) args->ring_base_address, (const void __user *) args->ring_base_address,
sizeof(uint64_t)))) { sizeof(uint64_t)))) {
pr_err("kfd: can't access ring base address\n"); pr_err("Can't access ring base address\n");
return -EFAULT; return -EFAULT;
} }
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
pr_err("kfd: ring size must be a power of 2 or 0\n"); pr_err("Ring size must be a power of 2 or 0\n");
return -EINVAL; return -EINVAL;
} }
@ -368,7 +368,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_percent = args->queue_percentage; properties.queue_percent = args->queue_percentage;
properties.priority = args->queue_priority; properties.priority = args->queue_priority;
pr_debug("kfd: updating queue id %d for PASID %d\n", pr_debug("Updating queue id %d for pasid %d\n",
args->queue_id, p->pasid); args->queue_id, p->pasid);
mutex_lock(&p->mutex); mutex_lock(&p->mutex);

View File

@ -78,7 +78,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
pq_packets_size_in_bytes / sizeof(uint32_t), pq_packets_size_in_bytes / sizeof(uint32_t),
&ib_packet_buff); &ib_packet_buff);
if (status != 0) { if (status != 0) {
pr_err("amdkfd: acquire_packet_buffer failed\n"); pr_err("acquire_packet_buffer failed\n");
return status; return status;
} }
@ -116,7 +116,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
&mem_obj); &mem_obj);
if (status != 0) { if (status != 0) {
pr_err("amdkfd: Failed to allocate GART memory\n"); pr_err("Failed to allocate GART memory\n");
kq->ops.rollback_packet(kq); kq->ops.rollback_packet(kq);
return status; return status;
} }
@ -194,7 +194,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
&qid); &qid);
if (status) { if (status) {
pr_err("amdkfd: Failed to create DIQ\n"); pr_err("Failed to create DIQ\n");
return status; return status;
} }
@ -203,7 +203,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
kq = pqm_get_kernel_queue(dbgdev->pqm, qid); kq = pqm_get_kernel_queue(dbgdev->pqm, qid);
if (kq == NULL) { if (kq == NULL) {
pr_err("amdkfd: Error getting DIQ\n"); pr_err("Error getting DIQ\n");
pqm_destroy_queue(dbgdev->pqm, qid); pqm_destroy_queue(dbgdev->pqm, qid);
return -EFAULT; return -EFAULT;
} }
@ -279,7 +279,7 @@ static void dbgdev_address_watch_set_registers(
} }
static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
struct dbg_address_watch_info *adw_info) struct dbg_address_watch_info *adw_info)
{ {
union TCP_WATCH_ADDR_H_BITS addrHi; union TCP_WATCH_ADDR_H_BITS addrHi;
union TCP_WATCH_ADDR_L_BITS addrLo; union TCP_WATCH_ADDR_L_BITS addrLo;
@ -293,7 +293,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
pdd = kfd_get_process_device_data(dbgdev->dev, pdd = kfd_get_process_device_data(dbgdev->dev,
adw_info->process); adw_info->process);
if (!pdd) { if (!pdd) {
pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n"); pr_err("Failed to get pdd for wave control no DIQ\n");
return -EFAULT; return -EFAULT;
} }
@ -303,13 +303,13 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) || if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
(adw_info->num_watch_points == 0)) { (adw_info->num_watch_points == 0)) {
pr_err("amdkfd: num_watch_points is invalid\n"); pr_err("num_watch_points is invalid\n");
return -EINVAL; return -EINVAL;
} }
if ((adw_info->watch_mode == NULL) || if ((adw_info->watch_mode == NULL) ||
(adw_info->watch_address == NULL)) { (adw_info->watch_address == NULL)) {
pr_err("amdkfd: adw_info fields are not valid\n"); pr_err("adw_info fields are not valid\n");
return -EINVAL; return -EINVAL;
} }
@ -348,7 +348,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
} }
static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
struct dbg_address_watch_info *adw_info) struct dbg_address_watch_info *adw_info)
{ {
struct pm4__set_config_reg *packets_vec; struct pm4__set_config_reg *packets_vec;
union TCP_WATCH_ADDR_H_BITS addrHi; union TCP_WATCH_ADDR_H_BITS addrHi;
@ -371,20 +371,20 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) || if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
(adw_info->num_watch_points == 0)) { (adw_info->num_watch_points == 0)) {
pr_err("amdkfd: num_watch_points is invalid\n"); pr_err("num_watch_points is invalid\n");
return -EINVAL; return -EINVAL;
} }
if ((NULL == adw_info->watch_mode) || if ((NULL == adw_info->watch_mode) ||
(NULL == adw_info->watch_address)) { (NULL == adw_info->watch_address)) {
pr_err("amdkfd: adw_info fields are not valid\n"); pr_err("adw_info fields are not valid\n");
return -EINVAL; return -EINVAL;
} }
status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj); status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
if (status != 0) { if (status != 0) {
pr_err("amdkfd: Failed to allocate GART memory\n"); pr_err("Failed to allocate GART memory\n");
return status; return status;
} }
@ -491,7 +491,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
ib_size); ib_size);
if (status != 0) { if (status != 0) {
pr_err("amdkfd: Failed to submit IB to DIQ\n"); pr_err("Failed to submit IB to DIQ\n");
break; break;
} }
} }
@ -619,7 +619,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd, status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
&reg_gfx_index); &reg_gfx_index);
if (status) { if (status) {
pr_err("amdkfd: Failed to set wave control registers\n"); pr_err("Failed to set wave control registers\n");
return status; return status;
} }
@ -659,7 +659,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj); status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
if (status != 0) { if (status != 0) {
pr_err("amdkfd: Failed to allocate GART memory\n"); pr_err("Failed to allocate GART memory\n");
return status; return status;
} }
@ -712,7 +712,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
ib_size); ib_size);
if (status != 0) if (status != 0)
pr_err("amdkfd: Failed to submit IB to DIQ\n"); pr_err("Failed to submit IB to DIQ\n");
kfd_gtt_sa_free(dbgdev->dev, mem_obj); kfd_gtt_sa_free(dbgdev->dev, mem_obj);
@ -735,13 +735,13 @@ static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev,
pdd = kfd_get_process_device_data(dbgdev->dev, wac_info->process); pdd = kfd_get_process_device_data(dbgdev->dev, wac_info->process);
if (!pdd) { if (!pdd) {
pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n"); pr_err("Failed to get pdd for wave control no DIQ\n");
return -EFAULT; return -EFAULT;
} }
status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd, status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
&reg_gfx_index); &reg_gfx_index);
if (status) { if (status) {
pr_err("amdkfd: Failed to set wave control registers\n"); pr_err("Failed to set wave control registers\n");
return status; return status;
} }
@ -826,7 +826,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
} }
if (vmid > last_vmid_to_scan) { if (vmid > last_vmid_to_scan) {
pr_err("amdkfd: didn't found vmid for pasid (%d)\n", p->pasid); pr_err("Didn't find vmid for pasid %d\n", p->pasid);
return -EFAULT; return -EFAULT;
} }

View File

@ -71,7 +71,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
new_buff = kfd_alloc_struct(new_buff); new_buff = kfd_alloc_struct(new_buff);
if (!new_buff) { if (!new_buff) {
pr_err("amdkfd: Failed to allocate dbgmgr instance\n"); pr_err("Failed to allocate dbgmgr instance\n");
return false; return false;
} }
@ -79,7 +79,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
new_buff->dev = pdev; new_buff->dev = pdev;
new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev); new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev);
if (!new_buff->dbgdev) { if (!new_buff->dbgdev) {
pr_err("amdkfd: Failed to allocate dbgdev instance\n"); pr_err("Failed to allocate dbgdev instance\n");
kfree(new_buff); kfree(new_buff);
return false; return false;
} }

View File

@ -152,7 +152,7 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
} }
if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) { if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n", dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n",
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0, (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0, (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
@ -248,42 +248,33 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->init_gtt_mem_allocation( if (kfd->kfd2kgd->init_gtt_mem_allocation(
kfd->kgd, size, &kfd->gtt_mem, kfd->kgd, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
dev_err(kfd_device, dev_err(kfd_device, "Could not allocate %d bytes\n", size);
"Could not allocate %d bytes for device (%x:%x)\n",
size, kfd->pdev->vendor, kfd->pdev->device);
goto out; goto out;
} }
dev_info(kfd_device, dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
"Allocated %d bytes on gart for device(%x:%x)\n",
size, kfd->pdev->vendor, kfd->pdev->device);
/* Initialize GTT sa with 512 byte chunk size */ /* Initialize GTT sa with 512 byte chunk size */
if (kfd_gtt_sa_init(kfd, size, 512) != 0) { if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
dev_err(kfd_device, dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
"Error initializing gtt sub-allocator\n");
goto kfd_gtt_sa_init_error; goto kfd_gtt_sa_init_error;
} }
kfd_doorbell_init(kfd); kfd_doorbell_init(kfd);
if (kfd_topology_add_device(kfd) != 0) { if (kfd_topology_add_device(kfd) != 0) {
dev_err(kfd_device, dev_err(kfd_device, "Error adding device to topology\n");
"Error adding device (%x:%x) to topology\n",
kfd->pdev->vendor, kfd->pdev->device);
goto kfd_topology_add_device_error; goto kfd_topology_add_device_error;
} }
if (kfd_interrupt_init(kfd)) { if (kfd_interrupt_init(kfd)) {
dev_err(kfd_device, dev_err(kfd_device, "Error initializing interrupts\n");
"Error initializing interrupts for device (%x:%x)\n",
kfd->pdev->vendor, kfd->pdev->device);
goto kfd_interrupt_error; goto kfd_interrupt_error;
} }
if (!device_iommu_pasid_init(kfd)) { if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device, dev_err(kfd_device,
"Error initializing iommuv2 for device (%x:%x)\n", "Error initializing iommuv2 for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device); kfd->pdev->vendor, kfd->pdev->device);
goto device_iommu_pasid_error; goto device_iommu_pasid_error;
} }
@ -293,15 +284,13 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->dqm = device_queue_manager_init(kfd); kfd->dqm = device_queue_manager_init(kfd);
if (!kfd->dqm) { if (!kfd->dqm) {
dev_err(kfd_device, dev_err(kfd_device, "Error initializing queue manager\n");
"Error initializing queue manager for device (%x:%x)\n",
kfd->pdev->vendor, kfd->pdev->device);
goto device_queue_manager_error; goto device_queue_manager_error;
} }
if (kfd->dqm->ops.start(kfd->dqm) != 0) { if (kfd->dqm->ops.start(kfd->dqm) != 0) {
dev_err(kfd_device, dev_err(kfd_device,
"Error starting queuen manager for device (%x:%x)\n", "Error starting queue manager for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device); kfd->pdev->vendor, kfd->pdev->device);
goto dqm_start_error; goto dqm_start_error;
} }
@ -309,10 +298,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->dbgmgr = NULL; kfd->dbgmgr = NULL;
kfd->init_complete = true; kfd->init_complete = true;
dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor, dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
kfd->pdev->device); kfd->pdev->device);
pr_debug("kfd: Starting kfd with the following scheduling policy %d\n", pr_debug("Starting kfd with the following scheduling policy %d\n",
sched_policy); sched_policy);
goto out; goto out;
@ -330,7 +319,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_gtt_sa_init_error: kfd_gtt_sa_init_error:
kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device, dev_err(kfd_device,
"device (%x:%x) NOT added due to errors\n", "device %x:%x NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device); kfd->pdev->vendor, kfd->pdev->device);
out: out:
return kfd->init_complete; return kfd->init_complete;
@ -422,7 +411,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
if (!kfd->gtt_sa_bitmap) if (!kfd->gtt_sa_bitmap)
return -ENOMEM; return -ENOMEM;
pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
mutex_init(&kfd->gtt_sa_lock); mutex_init(&kfd->gtt_sa_lock);
@ -468,7 +457,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
if ((*mem_obj) == NULL) if ((*mem_obj) == NULL)
return -ENOMEM; return -ENOMEM;
pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size); pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
start_search = 0; start_search = 0;
@ -480,7 +469,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_num_of_chunks,
start_search); start_search);
pr_debug("kfd: found = %d\n", found); pr_debug("Found = %d\n", found);
/* If there wasn't any free chunk, bail out */ /* If there wasn't any free chunk, bail out */
if (found == kfd->gtt_sa_num_of_chunks) if (found == kfd->gtt_sa_num_of_chunks)
@ -498,12 +487,12 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
found, found,
kfd->gtt_sa_chunk_size); kfd->gtt_sa_chunk_size);
pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n", pr_debug("gpu_addr = %p, cpu_addr = %p\n",
(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
/* If we need only one chunk, mark it as allocated and get out */ /* If we need only one chunk, mark it as allocated and get out */
if (size <= kfd->gtt_sa_chunk_size) { if (size <= kfd->gtt_sa_chunk_size) {
pr_debug("kfd: single bit\n"); pr_debug("Single bit\n");
set_bit(found, kfd->gtt_sa_bitmap); set_bit(found, kfd->gtt_sa_bitmap);
goto kfd_gtt_out; goto kfd_gtt_out;
} }
@ -538,7 +527,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
} while (cur_size > 0); } while (cur_size > 0);
pr_debug("kfd: range_start = %d, range_end = %d\n", pr_debug("range_start = %d, range_end = %d\n",
(*mem_obj)->range_start, (*mem_obj)->range_end); (*mem_obj)->range_start, (*mem_obj)->range_end);
/* Mark the chunks as allocated */ /* Mark the chunks as allocated */
@ -552,7 +541,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
return 0; return 0;
kfd_gtt_no_free_chunk: kfd_gtt_no_free_chunk:
pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj); pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
mutex_unlock(&kfd->gtt_sa_lock); mutex_unlock(&kfd->gtt_sa_lock);
kfree(mem_obj); kfree(mem_obj);
return -ENOMEM; return -ENOMEM;
@ -568,7 +557,7 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
if (!mem_obj) if (!mem_obj)
return 0; return 0;
pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n", pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
mem_obj, mem_obj->range_start, mem_obj->range_end); mem_obj, mem_obj->range_start, mem_obj->range_end);
mutex_lock(&kfd->gtt_sa_lock); mutex_lock(&kfd->gtt_sa_lock);

View File

@ -121,7 +121,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
/* Kaveri kfd vmid's starts from vmid 8 */ /* Kaveri kfd vmid's starts from vmid 8 */
allocated_vmid = bit + KFD_VMID_START_OFFSET; allocated_vmid = bit + KFD_VMID_START_OFFSET;
pr_debug("kfd: vmid allocation %d\n", allocated_vmid); pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid; qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid; q->properties.vmid = allocated_vmid;
@ -154,13 +154,12 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
BUG_ON(!dqm || !q || !qpd || !allocated_vmid); BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
pr_debug("kfd: In func %s\n", __func__);
print_queue(q); print_queue(q);
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) { if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count); dqm->total_queue_count);
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return -EPERM; return -EPERM;
@ -240,8 +239,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
if (!set) if (!set)
return -EBUSY; return -EBUSY;
pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n", pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
__func__, q->pipe, q->queue);
/* horizontal hqd allocation */ /* horizontal hqd allocation */
dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm); dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
@ -278,9 +276,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
return retval; return retval;
} }
pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n", pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
q->pipe, q->pipe, q->queue);
q->queue);
retval = mqd->load_mqd(mqd, q->mqd, q->pipe, retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
q->queue, (uint32_t __user *) q->properties.write_ptr); q->queue, (uint32_t __user *) q->properties.write_ptr);
@ -304,8 +301,6 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
retval = 0; retval = 0;
pr_debug("kfd: In Func %s\n", __func__);
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
@ -324,7 +319,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
dqm->sdma_queue_count--; dqm->sdma_queue_count--;
deallocate_sdma_queue(dqm, q->sdma_id); deallocate_sdma_queue(dqm, q->sdma_id);
} else { } else {
pr_debug("q->properties.type is invalid (%d)\n", pr_debug("q->properties.type %d is invalid\n",
q->properties.type); q->properties.type);
retval = -EINVAL; retval = -EINVAL;
goto out; goto out;
@ -403,13 +398,13 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX); BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
pr_debug("kfd: In func %s mqd type %d\n", __func__, type); pr_debug("mqd type %d\n", type);
mqd = dqm->mqds[type]; mqd = dqm->mqds[type];
if (!mqd) { if (!mqd) {
mqd = mqd_manager_init(type, dqm->dev); mqd = mqd_manager_init(type, dqm->dev);
if (mqd == NULL) if (mqd == NULL)
pr_err("kfd: mqd manager is NULL"); pr_err("mqd manager is NULL");
dqm->mqds[type] = mqd; dqm->mqds[type] = mqd;
} }
@ -424,8 +419,6 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
BUG_ON(!dqm || !qpd); BUG_ON(!dqm || !qpd);
pr_debug("kfd: In func %s\n", __func__);
n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL); n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
if (!n) if (!n)
return -ENOMEM; return -ENOMEM;
@ -452,8 +445,6 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
BUG_ON(!dqm || !qpd); BUG_ON(!dqm || !qpd);
pr_debug("In func %s\n", __func__);
pr_debug("qpd->queues_list is %s\n", pr_debug("qpd->queues_list is %s\n",
list_empty(&qpd->queues_list) ? "empty" : "not empty"); list_empty(&qpd->queues_list) ? "empty" : "not empty");
@ -501,25 +492,13 @@ static void init_interrupts(struct device_queue_manager *dqm)
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i); dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
} }
static int init_scheduler(struct device_queue_manager *dqm)
{
int retval = 0;
BUG_ON(!dqm);
pr_debug("kfd: In %s\n", __func__);
return retval;
}
static int initialize_nocpsch(struct device_queue_manager *dqm) static int initialize_nocpsch(struct device_queue_manager *dqm)
{ {
int pipe, queue; int pipe, queue;
BUG_ON(!dqm); BUG_ON(!dqm);
pr_debug("kfd: In func %s num of pipes: %d\n", pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
__func__, get_pipes_per_mec(dqm));
mutex_init(&dqm->lock); mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues); INIT_LIST_HEAD(&dqm->queues);
@ -544,7 +523,6 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1; dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1; dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
init_scheduler(dqm);
return 0; return 0;
} }
@ -617,9 +595,9 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE; q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM; q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
pr_debug("kfd: sdma id is: %d\n", q->sdma_id); pr_debug("SDMA id is: %d\n", q->sdma_id);
pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
@ -651,8 +629,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
BUG_ON(!dqm); BUG_ON(!dqm);
pr_debug("kfd: In func %s\n", __func__);
res.vmid_mask = (1 << VMID_PER_DEVICE) - 1; res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
res.vmid_mask <<= KFD_VMID_START_OFFSET; res.vmid_mask <<= KFD_VMID_START_OFFSET;
@ -682,9 +658,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
res.gws_mask = res.oac_mask = res.gds_heap_base = res.gws_mask = res.oac_mask = res.gds_heap_base =
res.gds_heap_size = 0; res.gds_heap_size = 0;
pr_debug("kfd: scheduling resources:\n" pr_debug("Scheduling resources:\n"
" vmid mask: 0x%8X\n" "vmid mask: 0x%8X\n"
" queue mask: 0x%8llX\n", "queue mask: 0x%8llX\n",
res.vmid_mask, res.queue_mask); res.vmid_mask, res.queue_mask);
return pm_send_set_resources(&dqm->packets, &res); return pm_send_set_resources(&dqm->packets, &res);
@ -696,8 +672,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
BUG_ON(!dqm); BUG_ON(!dqm);
pr_debug("kfd: In func %s num of pipes: %d\n", pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
__func__, get_pipes_per_mec(dqm));
mutex_init(&dqm->lock); mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues); INIT_LIST_HEAD(&dqm->queues);
@ -732,7 +707,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
if (retval != 0) if (retval != 0)
goto fail_set_sched_resources; goto fail_set_sched_resources;
pr_debug("kfd: allocating fence memory\n"); pr_debug("Allocating fence memory\n");
/* allocate fence memory on the gart */ /* allocate fence memory on the gart */
retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
@ -786,11 +761,9 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
{ {
BUG_ON(!dqm || !kq || !qpd); BUG_ON(!dqm || !kq || !qpd);
pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) { if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", pr_warn("Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count); dqm->total_queue_count);
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return -EPERM; return -EPERM;
@ -819,8 +792,6 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{ {
BUG_ON(!dqm || !kq); BUG_ON(!dqm || !kq);
pr_debug("kfd: In %s\n", __func__);
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
/* here we actually preempt the DIQ */ /* here we actually preempt the DIQ */
destroy_queues_cpsch(dqm, true, false); destroy_queues_cpsch(dqm, true, false);
@ -862,7 +833,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) { if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count); dqm->total_queue_count);
retval = -EPERM; retval = -EPERM;
goto out; goto out;
@ -916,7 +887,7 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
while (*fence_addr != fence_value) { while (*fence_addr != fence_value) {
if (time_after(jiffies, timeout)) { if (time_after(jiffies, timeout)) {
pr_err("kfd: qcm fence wait loop timeout expired\n"); pr_err("qcm fence wait loop timeout expired\n");
return -ETIME; return -ETIME;
} }
schedule(); schedule();
@ -949,7 +920,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist) if (!dqm->active_runlist)
goto out; goto out;
pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n", pr_debug("Before destroying queues, sdma queue count is : %u\n",
dqm->sdma_queue_count); dqm->sdma_queue_count);
if (dqm->sdma_queue_count > 0) { if (dqm->sdma_queue_count > 0) {
@ -998,7 +969,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
retval = destroy_queues_cpsch(dqm, false, false); retval = destroy_queues_cpsch(dqm, false, false);
if (retval != 0) { if (retval != 0) {
pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption"); pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
goto out; goto out;
} }
@ -1014,7 +985,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
retval = pm_send_runlist(&dqm->packets, &dqm->queues); retval = pm_send_runlist(&dqm->packets, &dqm->queues);
if (retval != 0) { if (retval != 0) {
pr_err("kfd: failed to execute runlist"); pr_err("failed to execute runlist");
goto out; goto out;
} }
dqm->active_runlist = true; dqm->active_runlist = true;
@ -1106,8 +1077,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
{ {
bool retval; bool retval;
pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (alternate_aperture_size == 0) { if (alternate_aperture_size == 0) {
@ -1152,7 +1121,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd); program_sh_mem_settings(dqm, qpd);
pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n", pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
qpd->sh_mem_config, qpd->sh_mem_ape1_base, qpd->sh_mem_config, qpd->sh_mem_ape1_base,
qpd->sh_mem_ape1_limit); qpd->sh_mem_ape1_limit);
@ -1170,7 +1139,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
BUG_ON(!dev); BUG_ON(!dev);
pr_debug("kfd: loading device queue manager\n"); pr_debug("Loading device queue manager\n");
dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL); dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
if (!dqm) if (!dqm)

View File

@ -127,7 +127,7 @@ static int register_process_cik(struct device_queue_manager *dqm,
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
} }
pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0; return 0;

View File

@ -139,7 +139,7 @@ static int register_process_vi(struct device_queue_manager *dqm,
SH_MEM_CONFIG__ADDRESS_MODE__SHIFT; SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
} }
pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0; return 0;

View File

@ -97,23 +97,23 @@ void kfd_doorbell_init(struct kfd_dev *kfd)
BUG_ON(!kfd->doorbell_kernel_ptr); BUG_ON(!kfd->doorbell_kernel_ptr);
pr_debug("kfd: doorbell initialization:\n"); pr_debug("Doorbell initialization:\n");
pr_debug("kfd: doorbell base == 0x%08lX\n", pr_debug("doorbell base == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base); (uintptr_t)kfd->doorbell_base);
pr_debug("kfd: doorbell_id_offset == 0x%08lX\n", pr_debug("doorbell_id_offset == 0x%08lX\n",
kfd->doorbell_id_offset); kfd->doorbell_id_offset);
pr_debug("kfd: doorbell_process_limit == 0x%08lX\n", pr_debug("doorbell_process_limit == 0x%08lX\n",
doorbell_process_limit); doorbell_process_limit);
pr_debug("kfd: doorbell_kernel_offset == 0x%08lX\n", pr_debug("doorbell_kernel_offset == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base); (uintptr_t)kfd->doorbell_base);
pr_debug("kfd: doorbell aperture size == 0x%08lX\n", pr_debug("doorbell aperture size == 0x%08lX\n",
kfd->shared_resources.doorbell_aperture_size); kfd->shared_resources.doorbell_aperture_size);
pr_debug("kfd: doorbell kernel address == 0x%08lX\n", pr_debug("doorbell kernel address == 0x%08lX\n",
(uintptr_t)kfd->doorbell_kernel_ptr); (uintptr_t)kfd->doorbell_kernel_ptr);
} }
@ -142,12 +142,11 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pr_debug("kfd: mapping doorbell page in %s\n" pr_debug("Mapping doorbell page\n"
" target user address == 0x%08llX\n" " target user address == 0x%08llX\n"
" physical address == 0x%08llX\n" " physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n" " vm_flags == 0x%04lX\n"
" size == 0x%04lX\n", " size == 0x%04lX\n",
__func__,
(unsigned long long) vma->vm_start, address, vma->vm_flags, (unsigned long long) vma->vm_start, address, vma->vm_flags,
doorbell_process_allocation()); doorbell_process_allocation());
@ -185,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
*doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() / *doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
sizeof(u32)) + inx; sizeof(u32)) + inx;
pr_debug("kfd: get kernel queue doorbell\n" pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n" " doorbell offset == 0x%08X\n"
" kernel address == 0x%08lX\n", " kernel address == 0x%08lX\n",
*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
@ -210,7 +209,7 @@ inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
{ {
if (db) { if (db) {
writel(value, db); writel(value, db);
pr_debug("writing %d to doorbell address 0x%p\n", value, db); pr_debug("Writing %d to doorbell address 0x%p\n", value, db);
} }
} }

View File

@ -110,7 +110,7 @@ static bool allocate_free_slot(struct kfd_process *process,
*out_page = page; *out_page = page;
*out_slot_index = slot; *out_slot_index = slot;
pr_debug("allocated event signal slot in page %p, slot %d\n", pr_debug("Allocated event signal slot in page %p, slot %d\n",
page, slot); page, slot);
return true; return true;
@ -155,9 +155,9 @@ static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
struct signal_page, struct signal_page,
event_pages)->page_index + 1; event_pages)->page_index + 1;
pr_debug("allocated new event signal page at %p, for process %p\n", pr_debug("Allocated new event signal page at %p, for process %p\n",
page, p); page, p);
pr_debug("page index is %d\n", page->page_index); pr_debug("Page index is %d\n", page->page_index);
list_add(&page->event_pages, &p->signal_event_pages); list_add(&page->event_pages, &p->signal_event_pages);
@ -292,13 +292,13 @@ static int create_signal_event(struct file *devkfd,
struct kfd_event *ev) struct kfd_event *ev)
{ {
if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) { if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
pr_warn("amdkfd: Signal event wasn't created because limit was reached\n"); pr_warn("Signal event wasn't created because limit was reached\n");
return -ENOMEM; return -ENOMEM;
} }
if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page, if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
&ev->signal_slot_index)) { &ev->signal_slot_index)) {
pr_warn("amdkfd: Signal event wasn't created because out of kernel memory\n"); pr_warn("Signal event wasn't created because out of kernel memory\n");
return -ENOMEM; return -ENOMEM;
} }
@ -310,11 +310,7 @@ static int create_signal_event(struct file *devkfd,
ev->event_id = make_signal_event_id(ev->signal_page, ev->event_id = make_signal_event_id(ev->signal_page,
ev->signal_slot_index); ev->signal_slot_index);
pr_debug("signal event number %zu created with id %d, address %p\n", pr_debug("Signal event number %zu created with id %d, address %p\n",
p->signal_event_count, ev->event_id,
ev->user_signal_address);
pr_debug("signal event number %zu created with id %d, address %p\n",
p->signal_event_count, ev->event_id, p->signal_event_count, ev->event_id,
ev->user_signal_address); ev->user_signal_address);
@ -817,7 +813,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
/* check required size is logical */ /* check required size is logical */
if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) != if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
get_order(vma->vm_end - vma->vm_start)) { get_order(vma->vm_end - vma->vm_start)) {
pr_err("amdkfd: event page mmap requested illegal size\n"); pr_err("Event page mmap requested illegal size\n");
return -EINVAL; return -EINVAL;
} }
@ -826,7 +822,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
page = lookup_signal_page_by_index(p, page_index); page = lookup_signal_page_by_index(p, page_index);
if (!page) { if (!page) {
/* Probably KFD bug, but mmap is user-accessible. */ /* Probably KFD bug, but mmap is user-accessible. */
pr_debug("signal page could not be found for page_index %u\n", pr_debug("Signal page could not be found for page_index %u\n",
page_index); page_index);
return -EINVAL; return -EINVAL;
} }
@ -837,7 +833,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
| VM_DONTDUMP | VM_PFNMAP; | VM_DONTDUMP | VM_PFNMAP;
pr_debug("mapping signal page\n"); pr_debug("Mapping signal page\n");
pr_debug(" start user address == 0x%08lx\n", vma->vm_start); pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
pr_debug(" end user address == 0x%08lx\n", vma->vm_end); pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
pr_debug(" pfn == 0x%016lX\n", pfn); pr_debug(" pfn == 0x%016lX\n", pfn);

View File

@ -44,8 +44,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
BUG_ON(!kq || !dev); BUG_ON(!kq || !dev);
BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
pr_debug("amdkfd: In func %s initializing queue type %d size %d\n", pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
__func__, KFD_QUEUE_TYPE_HIQ, queue_size); queue_size);
memset(&prop, 0, sizeof(prop)); memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop)); memset(&nop, 0, sizeof(nop));
@ -73,13 +73,13 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
if (prop.doorbell_ptr == NULL) { if (prop.doorbell_ptr == NULL) {
pr_err("amdkfd: error init doorbell"); pr_err("Failed to initialize doorbell");
goto err_get_kernel_doorbell; goto err_get_kernel_doorbell;
} }
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
if (retval != 0) { if (retval != 0) {
pr_err("amdkfd: error init pq queues size (%d)\n", queue_size); pr_err("Failed to init pq queues size %d\n", queue_size);
goto err_pq_allocate_vidmem; goto err_pq_allocate_vidmem;
} }
@ -139,7 +139,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
/* assign HIQ to HQD */ /* assign HIQ to HQD */
if (type == KFD_QUEUE_TYPE_HIQ) { if (type == KFD_QUEUE_TYPE_HIQ) {
pr_debug("assigning hiq to hqd\n"); pr_debug("Assigning hiq to hqd\n");
kq->queue->pipe = KFD_CIK_HIQ_PIPE; kq->queue->pipe = KFD_CIK_HIQ_PIPE;
kq->queue->queue = KFD_CIK_HIQ_QUEUE; kq->queue->queue = KFD_CIK_HIQ_QUEUE;
kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe, kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
@ -304,7 +304,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
} }
if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) { if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) {
pr_err("amdkfd: failed to init kernel queue\n"); pr_err("Failed to init kernel queue\n");
kfree(kq); kfree(kq);
return NULL; return NULL;
} }
@ -327,7 +327,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
BUG_ON(!dev); BUG_ON(!dev);
pr_err("amdkfd: starting kernel queue test\n"); pr_err("Starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
BUG_ON(!kq); BUG_ON(!kq);
@ -338,7 +338,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
buffer[i] = kq->nop_packet; buffer[i] = kq->nop_packet;
kq->ops.submit_packet(kq); kq->ops.submit_packet(kq);
pr_err("amdkfd: ending kernel queue test\n"); pr_err("Ending kernel queue test\n");
} }

View File

@ -91,7 +91,7 @@ static int __init kfd_module_init(void)
/* Verify module parameters */ /* Verify module parameters */
if ((sched_policy < KFD_SCHED_POLICY_HWS) || if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
(sched_policy > KFD_SCHED_POLICY_NO_HWS)) { (sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
pr_err("kfd: sched_policy has invalid value\n"); pr_err("sched_policy has invalid value\n");
return -1; return -1;
} }
@ -99,7 +99,7 @@ static int __init kfd_module_init(void)
if ((max_num_of_queues_per_device < 1) || if ((max_num_of_queues_per_device < 1) ||
(max_num_of_queues_per_device > (max_num_of_queues_per_device >
KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); pr_err("max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1; return -1;
} }

View File

@ -46,8 +46,6 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
BUG_ON(!mm || !q || !mqd); BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj); mqd_mem_obj);
@ -172,8 +170,6 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
BUG_ON(!mm || !q || !mqd); BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
m = get_mqd(mqd); m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE | m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN; DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
@ -302,8 +298,6 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
BUG_ON(!mm || !q || !mqd || !mqd_mem_obj); BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
pr_debug("kfd: In func %s\n", __func__);
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj); mqd_mem_obj);
@ -360,8 +354,6 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
BUG_ON(!mm || !q || !mqd); BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
m = get_mqd(mqd); m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE | m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE | DEFAULT_MIN_AVAIL_SIZE |
@ -414,8 +406,6 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
BUG_ON(!dev); BUG_ON(!dev);
BUG_ON(type >= KFD_MQD_TYPE_MAX); BUG_ON(type >= KFD_MQD_TYPE_MAX);
pr_debug("kfd: In func %s\n", __func__);
mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL); mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
if (!mqd) if (!mqd)
return NULL; return NULL;

View File

@ -108,8 +108,6 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
BUG_ON(!mm || !q || !mqd); BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
m = get_mqd(mqd); m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT | m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
@ -117,7 +115,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT; mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
m->cp_hqd_pq_control |= m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
@ -129,7 +127,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT | 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT |
q->doorbell_off << q->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n", pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control); m->cp_hqd_pq_doorbell_control);
m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT | m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
@ -241,8 +239,6 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
BUG_ON(!dev); BUG_ON(!dev);
BUG_ON(type >= KFD_MQD_TYPE_MAX); BUG_ON(type >= KFD_MQD_TYPE_MAX);
pr_debug("kfd: In func %s\n", __func__);
mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL); mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
if (!mqd) if (!mqd)
return NULL; return NULL;

View File

@ -67,7 +67,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
*over_subscription = false; *over_subscription = false;
if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) { if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
*over_subscription = true; *over_subscription = true;
pr_debug("kfd: over subscribed runlist\n"); pr_debug("Over subscribed runlist\n");
} }
map_queue_size = map_queue_size =
@ -85,7 +85,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
if (*over_subscription) if (*over_subscription)
*rlib_size += sizeof(struct pm4_runlist); *rlib_size += sizeof(struct pm4_runlist);
pr_debug("kfd: runlist ib size %d\n", *rlib_size); pr_debug("runlist ib size %d\n", *rlib_size);
} }
static int pm_allocate_runlist_ib(struct packet_manager *pm, static int pm_allocate_runlist_ib(struct packet_manager *pm,
@ -106,7 +106,7 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
&pm->ib_buffer_obj); &pm->ib_buffer_obj);
if (retval != 0) { if (retval != 0) {
pr_err("kfd: failed to allocate runlist IB\n"); pr_err("Failed to allocate runlist IB\n");
return retval; return retval;
} }
@ -152,8 +152,6 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
packet = (struct pm4_map_process *)buffer; packet = (struct pm4_map_process *)buffer;
pr_debug("kfd: In func %s\n", __func__);
memset(buffer, 0, sizeof(struct pm4_map_process)); memset(buffer, 0, sizeof(struct pm4_map_process));
packet->header.u32all = build_pm4_header(IT_MAP_PROCESS, packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
@ -189,8 +187,6 @@ static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
BUG_ON(!pm || !buffer || !q); BUG_ON(!pm || !buffer || !q);
pr_debug("kfd: In func %s\n", __func__);
packet = (struct pm4_mes_map_queues *)buffer; packet = (struct pm4_mes_map_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_map_queues)); memset(buffer, 0, sizeof(struct pm4_map_queues));
@ -223,8 +219,7 @@ static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
use_static = false; /* no static queues under SDMA */ use_static = false; /* no static queues under SDMA */
break; break;
default: default:
pr_err("kfd: in %s queue type %d\n", __func__, pr_err("queue type %d\n", q->properties.type);
q->properties.type);
BUG(); BUG();
break; break;
} }
@ -254,8 +249,6 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
BUG_ON(!pm || !buffer || !q); BUG_ON(!pm || !buffer || !q);
pr_debug("kfd: In func %s\n", __func__);
packet = (struct pm4_map_queues *)buffer; packet = (struct pm4_map_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_map_queues)); memset(buffer, 0, sizeof(struct pm4_map_queues));
@ -333,8 +326,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
*rl_size_bytes = alloc_size_bytes; *rl_size_bytes = alloc_size_bytes;
pr_debug("kfd: In func %s\n", __func__); pr_debug("Building runlist ib process count: %d queues count %d\n",
pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->queue_count); pm->dqm->processes_count, pm->dqm->queue_count);
/* build the run list ib packet */ /* build the run list ib packet */
@ -342,7 +334,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
qpd = cur->qpd; qpd = cur->qpd;
/* build map process packet */ /* build map process packet */
if (proccesses_mapped >= pm->dqm->processes_count) { if (proccesses_mapped >= pm->dqm->processes_count) {
pr_debug("kfd: not enough space left in runlist IB\n"); pr_debug("Not enough space left in runlist IB\n");
pm_release_ib(pm); pm_release_ib(pm);
return -ENOMEM; return -ENOMEM;
} }
@ -359,7 +351,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
if (!kq->queue->properties.is_active) if (!kq->queue->properties.is_active)
continue; continue;
pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n", pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
kq->queue->queue, qpd->is_debug); kq->queue->queue, qpd->is_debug);
if (pm->dqm->dev->device_info->asic_family == if (pm->dqm->dev->device_info->asic_family ==
@ -385,7 +377,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
if (!q->properties.is_active) if (!q->properties.is_active)
continue; continue;
pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n", pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
q->queue, qpd->is_debug); q->queue, qpd->is_debug);
if (pm->dqm->dev->device_info->asic_family == if (pm->dqm->dev->device_info->asic_family ==
@ -409,7 +401,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
} }
} }
pr_debug("kfd: finished map process and queues to runlist\n"); pr_debug("Finished map process and queues to runlist\n");
if (is_over_subscription) if (is_over_subscription)
pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr, pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
@ -453,15 +445,13 @@ int pm_send_set_resources(struct packet_manager *pm,
BUG_ON(!pm || !res); BUG_ON(!pm || !res);
pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&pm->lock); mutex_lock(&pm->lock);
pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
sizeof(*packet) / sizeof(uint32_t), sizeof(*packet) / sizeof(uint32_t),
(unsigned int **)&packet); (unsigned int **)&packet);
if (packet == NULL) { if (packet == NULL) {
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
pr_err("kfd: failed to allocate buffer on kernel queue\n"); pr_err("Failed to allocate buffer on kernel queue\n");
return -ENOMEM; return -ENOMEM;
} }
@ -504,7 +494,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
if (retval != 0) if (retval != 0)
goto fail_create_runlist_ib; goto fail_create_runlist_ib;
pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr); pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t); packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
mutex_lock(&pm->lock); mutex_lock(&pm->lock);
@ -595,7 +585,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
packet = (struct pm4_unmap_queues *)buffer; packet = (struct pm4_unmap_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_unmap_queues)); memset(buffer, 0, sizeof(struct pm4_unmap_queues));
pr_debug("kfd: static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n", pr_debug("static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
mode, reset, type); mode, reset, type);
packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES, packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_unmap_queues)); sizeof(struct pm4_unmap_queues));

View File

@ -101,7 +101,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
/* A prior open of /dev/kfd could have already created the process. */ /* A prior open of /dev/kfd could have already created the process. */
process = find_process(thread); process = find_process(thread);
if (process) if (process)
pr_debug("kfd: process already found\n"); pr_debug("Process already found\n");
if (!process) if (!process)
process = create_process(thread); process = create_process(thread);
@ -250,7 +250,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
kfd_dbgmgr_destroy(pdd->dev->dbgmgr); kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
if (pdd->reset_wavefronts) { if (pdd->reset_wavefronts) {
pr_warn("amdkfd: Resetting all wave fronts\n"); pr_warn("Resetting all wave fronts\n");
dbgdev_wave_reset_wavefronts(pdd->dev, p); dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false; pdd->reset_wavefronts = false;
} }

View File

@ -51,15 +51,13 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
BUG_ON(!pqm || !qid); BUG_ON(!pqm || !qid);
pr_debug("kfd: in %s\n", __func__);
found = find_first_zero_bit(pqm->queue_slot_bitmap, found = find_first_zero_bit(pqm->queue_slot_bitmap,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
pr_debug("kfd: the new slot id %lu\n", found); pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_info("amdkfd: Can not open more queues for process with pasid %d\n", pr_info("Cannot open more queues for process with pasid %d\n",
pqm->process->pasid); pqm->process->pasid);
return -ENOMEM; return -ENOMEM;
} }
@ -92,8 +90,6 @@ void pqm_uninit(struct process_queue_manager *pqm)
BUG_ON(!pqm); BUG_ON(!pqm);
pr_debug("In func %s\n", __func__);
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
retval = pqm_destroy_queue( retval = pqm_destroy_queue(
pqm, pqm,
@ -102,7 +98,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
pqn->kq->queue->properties.queue_id); pqn->kq->queue->properties.queue_id);
if (retval != 0) { if (retval != 0) {
pr_err("kfd: failed to destroy queue\n"); pr_err("failed to destroy queue\n");
return; return;
} }
} }
@ -136,7 +132,7 @@ static int create_cp_queue(struct process_queue_manager *pqm,
(*q)->device = dev; (*q)->device = dev;
(*q)->process = pqm->process; (*q)->process = pqm->process;
pr_debug("kfd: PQM After init queue"); pr_debug("PQM After init queue");
return retval; return retval;
@ -210,7 +206,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= VMID_PER_DEVICE) || ((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) { (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n"); pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
retval = -EPERM; retval = -EPERM;
goto err_create_queue; goto err_create_queue;
} }
@ -243,17 +239,17 @@ int pqm_create_queue(struct process_queue_manager *pqm,
} }
if (retval != 0) { if (retval != 0) {
pr_debug("Error dqm create queue\n"); pr_err("DQM create queue failed\n");
goto err_create_queue; goto err_create_queue;
} }
pr_debug("kfd: PQM After DQM create queue\n"); pr_debug("PQM After DQM create queue\n");
list_add(&pqn->process_queue_list, &pqm->queues); list_add(&pqn->process_queue_list, &pqm->queues);
if (q) { if (q) {
*properties = q->properties; *properties = q->properties;
pr_debug("kfd: PQM done creating queue\n"); pr_debug("PQM done creating queue\n");
print_queue_properties(properties); print_queue_properties(properties);
} }
@ -282,11 +278,9 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
BUG_ON(!pqm); BUG_ON(!pqm);
retval = 0; retval = 0;
pr_debug("kfd: In Func %s\n", __func__);
pqn = get_queue_by_qid(pqm, qid); pqn = get_queue_by_qid(pqm, qid);
if (pqn == NULL) { if (pqn == NULL) {
pr_err("kfd: queue id does not match any known queue\n"); pr_err("Queue id does not match any known queue\n");
return -EINVAL; return -EINVAL;
} }
@ -339,8 +333,7 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
pqn = get_queue_by_qid(pqm, qid); pqn = get_queue_by_qid(pqm, qid);
if (!pqn) { if (!pqn) {
pr_debug("amdkfd: No queue %d exists for update operation\n", pr_debug("No queue %d exists for update operation\n", qid);
qid);
return -EFAULT; return -EFAULT;
} }

View File

@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.simd_count); dev->node_props.simd_count);
if (dev->mem_bank_count < dev->node_props.mem_banks_count) { if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
pr_info_once("kfd: mem_banks_count truncated from %d to %d\n", pr_info_once("mem_banks_count truncated from %d to %d\n",
dev->node_props.mem_banks_count, dev->node_props.mem_banks_count,
dev->mem_bank_count); dev->mem_bank_count);
sysfs_show_32bit_prop(buffer, "mem_banks_count", sysfs_show_32bit_prop(buffer, "mem_banks_count",
@ -1147,7 +1147,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
gpu_id = kfd_generate_gpu_id(gpu); gpu_id = kfd_generate_gpu_id(gpu);
pr_debug("kfd: Adding new GPU (ID: 0x%x) to topology\n", gpu_id); pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
down_write(&topology_lock); down_write(&topology_lock);
/* /*
@ -1190,7 +1190,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) { if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE; dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
pr_info("amdkfd: adding doorbell packet type capability\n"); pr_info("Adding doorbell packet type capability\n");
} }
res = 0; res = 0;