From 435af0b919bf9eb78f4e05e8596ebed9ca7885b7 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Mon, 24 Apr 2023 14:46:16 -0400 Subject: drm/amdkfd: Optimize svm range map to GPU with XNACK on With XNACK on if svm_range_set_attr set the range access or access_in_place attribute, we don't call svm_range_validate_and_map to update GPU mapping. This avoids prefaulting the range pages on system memory if the range is not prefetch to VRAM and not mapped to GPUs. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 96a138a39515..c02430537e9c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -735,7 +735,9 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange, case KFD_IOCTL_SVM_ATTR_ACCESS: case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: case KFD_IOCTL_SVM_ATTR_NO_ACCESS: - *update_mapping = true; + if (!p->xnack_enabled) + *update_mapping = true; + gpuidx = kfd_process_gpuidx_from_gpuid(p, attrs[i].value); if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { -- cgit v1.2.3 From 8dc1db3172ae2f17ae71e33b608a33411ce8a1aa Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 14 Sep 2022 16:39:48 +0800 Subject: drm/amdkfd: Introduce kfd_node struct (v5) Introduce a new structure, kfd_node, which will now represent a compute node. kfd_node is carved out of kfd_dev structure. kfd_dev struct now will become the parent of kfd_node, and will store common resources such as doorbells, GTT sub-alloctor etc. kfd_node struct will store all resources specific to a compute node, such as device queue manager, interrupt handling etc. This is the first step in adding compute partition support in KFD. v2: introduce kfd_node struct to gc v11 (Hawking) v3: make reference to kfd_dev struct through kfd_node (Morris) v4: use kfd_node instead for kfd isr/mqd functions (Morris) v5: rebase (Alex) Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Hawking Zhang Signed-off-by: Morris Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 1 + drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 43 ++-- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 28 +-- drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 258 +++++++++++++-------- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 100 ++++---- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 4 +- .../drm/amd/amdkfd/kfd_device_queue_manager_v9.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 14 +- drivers/gpu/drm/amd/amdkfd/kfd_events.c | 12 +- drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 12 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 10 +- drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c | 64 ++--- drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 22 +- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 18 +- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 18 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 24 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 10 +- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 168 ++++++++------ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 54 ++--- .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 20 +- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 40 ++-- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 56 ++--- drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 8 +- 38 files changed, 573 insertions(+), 495 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index de6ba0d4b860..af37f2ef4438 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -35,6 +35,7 @@ #include "amdgpu_dma_buf.h" #include #include "amdgpu_xgmi.h" +#include "kfd_priv.h" #include "kfd_smi_events.h" #include diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index 5c8023cba196..4ebfff6b6c55 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -26,7 +26,7 @@ #include "amdgpu_amdkfd.h" #include "kfd_smi_events.h" -static bool cik_event_interrupt_isr(struct kfd_dev *dev, +static bool cik_event_interrupt_isr(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag) @@ -85,7 +85,7 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, !amdgpu_no_queue_eviction_on_vm_fault); } -static void cik_event_interrupt_wq(struct kfd_dev *dev, +static void cik_event_interrupt_wq(struct kfd_node *dev, const uint32_t *ih_ring_entry) { const struct cik_ih_ring_entry *ihre = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 81d07ecf666d..eb0b0b38f10e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -293,7 +293,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_create_queue_args *args = data; - struct kfd_dev *dev; + struct kfd_node *dev; int err = 0; unsigned int queue_id; struct kfd_process_device *pdd; @@ -328,7 +328,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, } if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) { + kfd_alloc_process_doorbells(dev->kfd, &pdd->doorbell_index) < 0) { err = -ENOMEM; goto err_alloc_doorbells; } @@ -336,7 +336,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work * on unmapped queues for usermode queue oversubscription (no aggregated doorbell) */ - if (dev->shared_resources.enable_mes && + if (dev->kfd->shared_resources.enable_mes && ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) { struct amdgpu_bo_va_mapping *wptr_mapping; @@ -887,7 +887,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep, { struct kfd_ioctl_set_scratch_backing_va_args *args = data; struct kfd_process_device *pdd; - struct kfd_dev *dev; + struct kfd_node *dev; long err; mutex_lock(&p->mutex); @@ -1006,18 +1006,18 @@ err_drm_file: return ret; } -bool kfd_dev_is_large_bar(struct kfd_dev *dev) +bool kfd_dev_is_large_bar(struct kfd_node *dev) { if (debug_largebar) { pr_debug("Simulate large-bar allocation on non large-bar machine\n"); return true; } - if (dev->use_iommu_v2) + if (dev->kfd->use_iommu_v2) return false; - if (dev->local_mem_info.local_mem_size_private == 0 && - dev->local_mem_info.local_mem_size_public > 0) + if (dev->kfd->local_mem_info.local_mem_size_private == 0 && + dev->kfd->local_mem_info.local_mem_size_public > 0) return true; return false; } @@ -1041,7 +1041,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, struct kfd_ioctl_alloc_memory_of_gpu_args *args = data; struct kfd_process_device *pdd; void *mem; - struct kfd_dev *dev; + struct kfd_node *dev; int idr_handle; long err; uint64_t offset = args->mmap_offset; @@ -1105,7 +1105,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, } if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { - if (args->size != kfd_doorbell_process_slice(dev)) { + if (args->size != kfd_doorbell_process_slice(dev->kfd)) { err = -EINVAL; goto err_unlock; } @@ -1231,7 +1231,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, struct kfd_ioctl_map_memory_to_gpu_args *args = data; struct kfd_process_device *pdd, *peer_pdd; void *mem; - struct kfd_dev *dev; + struct kfd_node *dev; long err = 0; int i; uint32_t *devices_arr = NULL; @@ -1405,7 +1405,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, args->n_success = i+1; } - flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev); + flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd); if (flush_tlb) { err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev, (struct kgd_mem *) mem, true); @@ -1445,7 +1445,7 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep, int retval; struct kfd_ioctl_alloc_queue_gws_args *args = data; struct queue *q; - struct kfd_dev *dev; + struct kfd_node *dev; mutex_lock(&p->mutex); q = pqm_get_user_queue(&p->pqm, args->queue_id); @@ -1482,7 +1482,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_get_dmabuf_info_args *args = data; - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; struct amdgpu_device *dmabuf_adev; void *metadata_buffer = NULL; uint32_t flags; @@ -1596,7 +1596,7 @@ static int kfd_ioctl_export_dmabuf(struct file *filep, struct kfd_ioctl_export_dmabuf_args *args = data; struct kfd_process_device *pdd; struct dma_buf *dmabuf; - struct kfd_dev *dev; + struct kfd_node *dev; void *mem; int ret = 0; @@ -2178,7 +2178,7 @@ static int criu_restore_devices(struct kfd_process *p, } for (i = 0; i < args->num_devices; i++) { - struct kfd_dev *dev; + struct kfd_node *dev; struct kfd_process_device *pdd; struct file *drm_file; @@ -2240,7 +2240,7 @@ static int criu_restore_devices(struct kfd_process *p, } if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) { + kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) { ret = -ENOMEM; goto exit; } @@ -2268,7 +2268,8 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, u64 offset; if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { - if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev)) + if (bo_bucket->size != + kfd_doorbell_process_slice(pdd->dev->kfd)) return -EINVAL; offset = kfd_get_process_doorbells(pdd); @@ -2350,7 +2351,7 @@ static int criu_restore_bo(struct kfd_process *p, /* now map these BOs to GPU/s */ for (j = 0; j < p->n_pdds; j++) { - struct kfd_dev *peer; + struct kfd_node *peer; struct kfd_process_device *peer_pdd; if (!bo_priv->mapped_gpuids[j]) @@ -2947,7 +2948,7 @@ err_i1: return retcode; } -static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, +static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma) { phys_addr_t address; @@ -2981,7 +2982,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) { struct kfd_process *process; - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; unsigned long mmap_offset; unsigned int gpu_id; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 475e47027354..f5aebba31e88 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1405,7 +1405,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, return i; } -int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info) +int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info) { int num_of_cache_types = 0; @@ -1524,7 +1524,7 @@ int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pca case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): num_of_cache_types = - kfd_fill_gpu_cache_info_from_gfx_config(kdev, *pcache_info); + kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info); break; default: *pcache_info = dummy_cache_info; @@ -1858,7 +1858,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) } static int kfd_fill_gpu_memory_affinity(int *avail_size, - struct kfd_dev *kdev, uint8_t type, uint64_t size, + struct kfd_node *kdev, uint8_t type, uint64_t size, struct crat_subtype_memory *sub_type_hdr, uint32_t proximity_domain, const struct kfd_local_mem_info *local_mem_info) @@ -1887,7 +1887,7 @@ static int kfd_fill_gpu_memory_affinity(int *avail_size, } #ifdef CONFIG_ACPI_NUMA -static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev) +static void kfd_find_numa_node_in_srat(struct kfd_node *kdev) { struct acpi_table_header *table_header = NULL; struct acpi_subtable_header *sub_header = NULL; @@ -1982,7 +1982,7 @@ static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev) * Return 0 if successful else return -ve value */ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, - struct kfd_dev *kdev, + struct kfd_node *kdev, struct crat_subtype_iolink *sub_type_hdr, uint32_t proximity_domain) { @@ -2044,8 +2044,8 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, } static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, - struct kfd_dev *kdev, - struct kfd_dev *peer_kdev, + struct kfd_node *kdev, + struct kfd_node *peer_kdev, struct crat_subtype_iolink *sub_type_hdr, uint32_t proximity_domain_from, uint32_t proximity_domain_to) @@ -2081,7 +2081,7 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, * [OUT] actual size of data filled in crat_image */ static int kfd_create_vcrat_image_gpu(void *pcrat_image, - size_t *size, struct kfd_dev *kdev, + size_t *size, struct kfd_node *kdev, uint32_t proximity_domain) { struct crat_header *crat_table = (struct crat_header *)pcrat_image; @@ -2153,7 +2153,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, /* Check if this node supports IOMMU. During parsing this flag will * translate to HSA_CAP_ATS_PRESENT */ - if (!kfd_iommu_check_device(kdev)) + if (!kfd_iommu_check_device(kdev->kfd)) cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT; crat_table->length += sub_type_hdr->length; @@ -2164,7 +2164,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * report the total FB size (public+private) as a single * private heap. */ - local_mem_info = kdev->local_mem_info; + local_mem_info = kdev->kfd->local_mem_info; sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length); @@ -2216,12 +2216,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * (from other GPU to this GPU) will be added * in kfd_parse_subtype_iolink. */ - if (kdev->hive_id) { + if (kdev->kfd->hive_id) { for (nid = 0; nid < proximity_domain; ++nid) { peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid); if (!peer_dev->gpu) continue; - if (peer_dev->gpu->hive_id != kdev->hive_id) + if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) continue; sub_type_hdr = (typeof(sub_type_hdr))( (char *)sub_type_hdr + @@ -2255,12 +2255,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU * -- this option is not currently implemented. * The assumption is that all AMD APUs will have CRAT - * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU + * @kdev: Valid kfd_node required if flags contain COMPUTE_UNIT_GPU * * Return 0 if successful else return -ve value */ int kfd_create_crat_image_virtual(void **crat_image, size_t *size, - int flags, struct kfd_dev *kdev, + int flags, struct kfd_node *kdev, uint32_t proximity_domain) { void *pcrat_image = NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h index 8d1e8ba58dee..3d0e533b93b9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -293,7 +293,7 @@ struct crat_subtype_generic { #pragma pack() -struct kfd_dev; +struct kfd_node; /* Static table to describe GPU Cache information */ struct kfd_gpu_cache_info { @@ -305,14 +305,14 @@ struct kfd_gpu_cache_info { */ uint32_t num_cu_shared; }; -int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info); +int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info); int kfd_create_crat_image_acpi(void **crat_image, size_t *size); void kfd_destroy_crat_image(void *crat_image); int kfd_parse_crat_table(void *crat_image, struct list_head *device_list, uint32_t proximity_domain); int kfd_create_crat_image_virtual(void **crat_image, size_t *size, - int flags, struct kfd_dev *kdev, + int flags, struct kfd_node *kdev, uint32_t proximity_domain); #endif /* KFD_CRAT_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c index ad5a40a685ac..4a5a0a4e00f2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c @@ -43,7 +43,7 @@ static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data) static ssize_t kfd_debugfs_hang_hws_write(struct file *file, const char __user *user_buf, size_t size, loff_t *ppos) { - struct kfd_dev *dev; + struct kfd_node *dev; char tmp[16]; uint32_t gpu_id; int ret = -EINVAL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1510041a6ee1..23d9a7f77055 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -61,7 +61,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, static void kfd_gtt_sa_fini(struct kfd_dev *kfd); static int kfd_resume_iommu(struct kfd_dev *kfd); -static int kfd_resume(struct kfd_dev *kfd); +static int kfd_resume(struct kfd_node *kfd); static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) { @@ -441,8 +441,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) memset(&kfd->doorbell_available_index, 0, sizeof(kfd->doorbell_available_index)); - atomic_set(&kfd->sram_ecc_flag, 0); - ida_init(&kfd->doorbell_ida); return kfd; @@ -489,41 +487,106 @@ static void kfd_cwsr_init(struct kfd_dev *kfd) } } -static int kfd_gws_init(struct kfd_dev *kfd) +static int kfd_gws_init(struct kfd_node *node) { int ret = 0; + struct kfd_dev *kfd = node->kfd; - if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) + if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) return 0; - if (hws_gws_support || (KFD_IS_SOC15(kfd) && - ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1) + if (hws_gws_support || (KFD_IS_SOC15(node) && + ((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1) && kfd->mec2_fw_version >= 0x81b3) || - (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0) + (KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0) && kfd->mec2_fw_version >= 0x1b3) || - (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1) + (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1) && kfd->mec2_fw_version >= 0x30) || - (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) + (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2) && kfd->mec2_fw_version >= 0x28) || - (KFD_GC_VERSION(kfd) >= IP_VERSION(10, 3, 0) - && KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0) + (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0) + && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0) && kfd->mec2_fw_version >= 0x6b)))) - ret = amdgpu_amdkfd_alloc_gws(kfd->adev, - kfd->adev->gds.gws_size, &kfd->gws); + ret = amdgpu_amdkfd_alloc_gws(node->adev, + node->adev->gds.gws_size, &node->gws); return ret; } -static void kfd_smi_init(struct kfd_dev *dev) +static void kfd_smi_init(struct kfd_node *dev) { INIT_LIST_HEAD(&dev->smi_clients); spin_lock_init(&dev->smi_lock); } +static int kfd_init_node(struct kfd_node *node) +{ + int err = -1; + + if (kfd_interrupt_init(node)) { + dev_err(kfd_device, "Error initializing interrupts\n"); + goto kfd_interrupt_error; + } + + node->dqm = device_queue_manager_init(node); + if (!node->dqm) { + dev_err(kfd_device, "Error initializing queue manager\n"); + goto device_queue_manager_error; + } + + if (kfd_gws_init(node)) { + dev_err(kfd_device, "Could not allocate %d gws\n", + node->adev->gds.gws_size); + goto gws_error; + } + + if (kfd_resume(node)) + goto kfd_resume_error; + + if (kfd_topology_add_device(node)) { + dev_err(kfd_device, "Error adding device to topology\n"); + goto kfd_topology_add_device_error; + } + + kfd_smi_init(node); + + return 0; + +kfd_topology_add_device_error: +kfd_resume_error: +gws_error: + device_queue_manager_uninit(node->dqm); +device_queue_manager_error: + kfd_interrupt_exit(node); +kfd_interrupt_error: + if (node->gws) + amdgpu_amdkfd_free_gws(node->adev, node->gws); + + /* Cleanup the node memory here */ + kfree(node); + return err; +} + +static void kfd_cleanup_node(struct kfd_dev *kfd) +{ + struct kfd_node *knode = kfd->node; + + device_queue_manager_uninit(knode->dqm); + kfd_interrupt_exit(knode); + kfd_topology_remove_device(knode); + if (knode->gws) + amdgpu_amdkfd_free_gws(knode->adev, knode->gws); + kfree(knode); + kfd->node = NULL; +} + bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources) { unsigned int size, map_process_packet_size; + struct kfd_node *node; + uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; + unsigned int max_proc_per_quantum; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -533,10 +596,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; - kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; - kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; - kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd - - kfd->vm_info.first_vmid_kfd + 1; + first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; + last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; + vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1; /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. * 32 and 64-bit requests are possible and must be @@ -557,9 +619,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, /* Verify module parameters regarding mapped process number*/ if (hws_max_conc_proc >= 0) - kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); + max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd); else - kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; + max_proc_per_quantum = vmid_num_kfd; /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * @@ -609,26 +671,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->noretry = kfd->adev->gmc.noretry; - if (kfd_interrupt_init(kfd)) { - dev_err(kfd_device, "Error initializing interrupts\n"); - goto kfd_interrupt_error; - } - - kfd->dqm = device_queue_manager_init(kfd); - if (!kfd->dqm) { - dev_err(kfd_device, "Error initializing queue manager\n"); - goto device_queue_manager_error; - } - - /* If supported on this device, allocate global GWS that is shared - * by all KFD processes - */ - if (kfd_gws_init(kfd)) { - dev_err(kfd_device, "Could not allocate %d gws\n", - kfd->adev->gds.gws_size); - goto gws_error; - } - /* If CRAT is broken, won't set iommu enabled */ kfd_double_confirm_iommu_support(kfd); @@ -642,46 +684,54 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, svm_migrate_init(kfd->adev); - if (kfd_resume_iommu(kfd)) - goto device_iommu_error; - - if (kfd_resume(kfd)) - goto kfd_resume_error; - - amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); + /* Allocate the KFD node */ + node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); + if (!node) { + dev_err(kfd_device, "Error allocating KFD node\n"); + goto node_alloc_error; + } - if (kfd_topology_add_device(kfd)) { - dev_err(kfd_device, "Error adding device to topology\n"); - goto kfd_topology_add_device_error; + node->adev = kfd->adev; + node->kfd = kfd; + node->kfd2kgd = kfd->kfd2kgd; + node->vm_info.vmid_num_kfd = vmid_num_kfd; + node->vm_info.first_vmid_kfd = first_vmid_kfd; + node->vm_info.last_vmid_kfd = last_vmid_kfd; + node->max_proc_per_quantum = max_proc_per_quantum; + atomic_set(&node->sram_ecc_flag, 0); + + /* Initialize the KFD node */ + if (kfd_init_node(node)) { + dev_err(kfd_device, "Error initializing KFD node\n"); + goto node_init_error; } + kfd->node = node; - kfd_smi_init(kfd); + if (kfd_resume_iommu(kfd)) + goto kfd_resume_iommu_error; + + amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); kfd->init_complete = true; dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor, kfd->adev->pdev->device); pr_debug("Starting kfd with the following scheduling policy %d\n", - kfd->dqm->sched_policy); + node->dqm->sched_policy); goto out; -kfd_topology_add_device_error: -kfd_resume_error: +kfd_resume_iommu_error: + kfd_cleanup_node(kfd); +node_init_error: +node_alloc_error: device_iommu_error: -gws_error: - device_queue_manager_uninit(kfd->dqm); -device_queue_manager_error: - kfd_interrupt_exit(kfd); -kfd_interrupt_error: kfd_doorbell_fini(kfd); kfd_doorbell_error: kfd_gtt_sa_fini(kfd); kfd_gtt_sa_init_error: amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); alloc_gtt_mem_failure: - if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); dev_err(kfd_device, "device %x:%x NOT added due to errors\n", kfd->adev->pdev->vendor, kfd->adev->pdev->device); @@ -692,15 +742,11 @@ out: void kgd2kfd_device_exit(struct kfd_dev *kfd) { if (kfd->init_complete) { - device_queue_manager_uninit(kfd->dqm); - kfd_interrupt_exit(kfd); - kfd_topology_remove_device(kfd); + kfd_cleanup_node(kfd); kfd_doorbell_fini(kfd); ida_destroy(&kfd->doorbell_ida); kfd_gtt_sa_fini(kfd); amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); - if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); } kfree(kfd); @@ -708,16 +754,18 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) int kgd2kfd_pre_reset(struct kfd_dev *kfd) { + struct kfd_node *node = kfd->node; + if (!kfd->init_complete) return 0; - kfd_smi_event_update_gpu_reset(kfd, false); + kfd_smi_event_update_gpu_reset(node, false); - kfd->dqm->ops.pre_reset(kfd->dqm); + node->dqm->ops.pre_reset(node->dqm); kgd2kfd_suspend(kfd, false); - kfd_signal_reset_event(kfd); + kfd_signal_reset_event(node); return 0; } @@ -730,18 +778,19 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) int kgd2kfd_post_reset(struct kfd_dev *kfd) { int ret; + struct kfd_node *node = kfd->node; if (!kfd->init_complete) return 0; - ret = kfd_resume(kfd); + ret = kfd_resume(node); if (ret) return ret; atomic_dec(&kfd_locked); - atomic_set(&kfd->sram_ecc_flag, 0); + atomic_set(&node->sram_ecc_flag, 0); - kfd_smi_event_update_gpu_reset(kfd, true); + kfd_smi_event_update_gpu_reset(node, true); return 0; } @@ -753,6 +802,8 @@ bool kfd_is_locked(void) void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { + struct kfd_node *node = kfd->node; + if (!kfd->init_complete) return; @@ -763,18 +814,19 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) kfd_suspend_all_processes(); } - kfd->dqm->ops.stop(kfd->dqm); + node->dqm->ops.stop(node->dqm); kfd_iommu_suspend(kfd); } int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { int ret, count; + struct kfd_node *node = kfd->node; if (!kfd->init_complete) return 0; - ret = kfd_resume(kfd); + ret = kfd_resume(node); if (ret) return ret; @@ -809,15 +861,15 @@ static int kfd_resume_iommu(struct kfd_dev *kfd) return err; } -static int kfd_resume(struct kfd_dev *kfd) +static int kfd_resume(struct kfd_node *node) { int err = 0; - err = kfd->dqm->ops.start(kfd->dqm); + err = node->dqm->ops.start(node->dqm); if (err) dev_err(kfd_device, "Error starting queue manager for device %x:%x\n", - kfd->adev->pdev->vendor, kfd->adev->pdev->device); + node->adev->pdev->vendor, node->adev->pdev->device); return err; } @@ -843,6 +895,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; bool is_patched = false; unsigned long flags; + struct kfd_node *node = kfd->node; if (!kfd->init_complete) return; @@ -852,16 +905,16 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) return; } - spin_lock_irqsave(&kfd->interrupt_lock, flags); + spin_lock_irqsave(&node->interrupt_lock, flags); - if (kfd->interrupts_active - && interrupt_is_wanted(kfd, ih_ring_entry, + if (node->interrupts_active + && interrupt_is_wanted(node, ih_ring_entry, patched_ihre, &is_patched) - && enqueue_ih_ring_entry(kfd, + && enqueue_ih_ring_entry(node, is_patched ? patched_ihre : ih_ring_entry)) - kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work); + kfd_queue_work(node->ih_wq, &node->interrupt_work); - spin_unlock_irqrestore(&kfd->interrupt_lock, flags); + spin_unlock_irqrestore(&node->interrupt_lock, flags); } int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger) @@ -999,10 +1052,11 @@ static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); } -int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, +int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, struct kfd_mem_obj **mem_obj) { unsigned int found, start_search, cur_size; + struct kfd_dev *kfd = node->kfd; if (size == 0) return -EINVAL; @@ -1102,8 +1156,10 @@ kfd_gtt_no_free_chunk: return -ENOMEM; } -int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) +int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj) { + struct kfd_dev *kfd = node->kfd; + /* Act like kfree when trying to free a NULL object */ if (!mem_obj) return 0; @@ -1126,28 +1182,28 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) { if (kfd) - atomic_inc(&kfd->sram_ecc_flag); + atomic_inc(&kfd->node->sram_ecc_flag); } -void kfd_inc_compute_active(struct kfd_dev *kfd) +void kfd_inc_compute_active(struct kfd_node *node) { - if (atomic_inc_return(&kfd->compute_profile) == 1) - amdgpu_amdkfd_set_compute_idle(kfd->adev, false); + if (atomic_inc_return(&node->kfd->compute_profile) == 1) + amdgpu_amdkfd_set_compute_idle(node->adev, false); } -void kfd_dec_compute_active(struct kfd_dev *kfd) +void kfd_dec_compute_active(struct kfd_node *node) { - int count = atomic_dec_return(&kfd->compute_profile); + int count = atomic_dec_return(&node->kfd->compute_profile); if (count == 0) - amdgpu_amdkfd_set_compute_idle(kfd->adev, true); + amdgpu_amdkfd_set_compute_idle(node->adev, true); WARN_ONCE(count < 0, "Compute profile ref. count error"); } void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { if (kfd && kfd->init_complete) - kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); + kfd_smi_event_update_thermal_throttling(kfd->node, throttle_bitmask); } /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and @@ -1155,19 +1211,19 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) * When the device has more than two engines, we reserve two for PCIe to enable * full-duplex and the rest are used as XGMI. */ -unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev) +unsigned int kfd_get_num_sdma_engines(struct kfd_node *node) { /* If XGMI is not supported, all SDMA engines are PCIe */ - if (!kdev->adev->gmc.xgmi.supported) - return kdev->adev->sdma.num_instances; + if (!node->adev->gmc.xgmi.supported) + return node->adev->sdma.num_instances; - return min(kdev->adev->sdma.num_instances, 2); + return min(node->adev->sdma.num_instances, 2); } -unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) { /* After reserved for PCIe, the rest of engines are XGMI */ - return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev); + return node->adev->sdma.num_instances - kfd_get_num_sdma_engines(node); } #if defined(CONFIG_DEBUG_FS) @@ -1175,7 +1231,7 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) /* This function will send a package to HIQ to hang the HWS * which will trigger a GPU reset and bring the HWS back to normal state */ -int kfd_debugfs_hang_hws(struct kfd_dev *dev) +int kfd_debugfs_hang_hws(struct kfd_node *dev) { if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { pr_err("HWS is not enabled"); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 7a95698d83f7..34977d89f01c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -74,31 +74,31 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) { int i; - int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec - + pipe) * dqm->dev->shared_resources.num_queue_per_pipe; + int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec + + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe; /* queue is available for KFD usage if bit is 1 */ - for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i) + for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i) if (test_bit(pipe_offset + i, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) return true; return false; } unsigned int get_cp_queues_num(struct device_queue_manager *dqm) { - return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap, + return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap, KGD_MAX_QUEUES); } unsigned int get_queues_per_pipe(struct device_queue_manager *dqm) { - return dqm->dev->shared_resources.num_queue_per_pipe; + return dqm->dev->kfd->shared_resources.num_queue_per_pipe; } unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) { - return dqm->dev->shared_resources.num_pipe_per_mec; + return dqm->dev->kfd->shared_resources.num_pipe_per_mec; } static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) @@ -110,18 +110,18 @@ static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) { return kfd_get_num_sdma_engines(dqm->dev) * - dqm->dev->device_info.num_sdma_queues_per_engine; + dqm->dev->kfd->device_info.num_sdma_queues_per_engine; } unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) { return kfd_get_num_xgmi_sdma_engines(dqm->dev) * - dqm->dev->device_info.num_sdma_queues_per_engine; + dqm->dev->kfd->device_info.num_sdma_queues_per_engine; } static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm) { - return dqm->dev->device_info.reserved_sdma_queues_bitmap; + return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap; } void program_sh_mem_settings(struct device_queue_manager *dqm, @@ -330,7 +330,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q, uint32_t const *restore_id) { - struct kfd_dev *dev = qpd->dqm->dev; + struct kfd_node *dev = qpd->dqm->dev; if (!KFD_IS_SOC15(dev)) { /* On pre-SOC15 chips we need to use the queue ID to @@ -349,7 +349,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, * for a SDMA engine is 512. */ - uint32_t *idx_offset = dev->shared_resources.sdma_doorbell_idx; + uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx; uint32_t valid_id = idx_offset[q->properties.sdma_engine_id] + (q->properties.sdma_queue_id & 1) * KFD_QUEUE_DOORBELL_MIRROR_OFFSET @@ -382,7 +382,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, } q->properties.doorbell_off = - kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd), + kfd_get_doorbell_dw_offset_in_bar(dev->kfd, qpd_to_pdd(qpd), q->doorbell_id); return 0; } @@ -391,7 +391,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, struct queue *q) { unsigned int old; - struct kfd_dev *dev = qpd->dqm->dev; + struct kfd_node *dev = qpd->dqm->dev; if (!KFD_IS_SOC15(dev) || q->properties.type == KFD_QUEUE_TYPE_SDMA || @@ -441,7 +441,7 @@ static int allocate_vmid(struct device_queue_manager *dqm, program_sh_mem_settings(dqm, qpd); - if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled) + if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled) program_trap_handler_settings(dqm, qpd); /* qpd->page_table_base is set earlier when register_process() @@ -460,7 +460,7 @@ static int allocate_vmid(struct device_queue_manager *dqm, return 0; } -static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, +static int flush_texture_cache_nocpsch(struct kfd_node *kdev, struct qcm_process_device *qpd) { const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf; @@ -661,7 +661,7 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm, #define SQ_IND_CMD_CMD_KILL 0x00000003 #define SQ_IND_CMD_MODE_BROADCAST 0x00000001 -static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) +static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p) { int status = 0; unsigned int vmid; @@ -837,7 +837,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, /* Make sure the queue is unmapped before updating the MQD */ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false); else if (prev_active) @@ -858,7 +858,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, } retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, - (dqm->dev->cwsr_enabled ? + (dqm->dev->kfd->cwsr_enabled ? KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -895,7 +895,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, } if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = map_queues_cpsch(dqm); else if (q->properties.is_active) retval = add_queue_mes(dqm, q, &pdd->qpd); @@ -951,7 +951,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, continue; retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, - (dqm->dev->cwsr_enabled ? + (dqm->dev->kfd->cwsr_enabled ? KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -993,7 +993,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, q->properties.is_active = false; decrement_queue_count(dqm, qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = remove_queue_mes(dqm, q, qpd); if (retval) { pr_err("Failed to evict queue %d\n", @@ -1003,7 +1003,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, } } pdd->last_evict_timestamp = get_jiffies_64(); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, qpd->is_debug ? KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : @@ -1132,7 +1132,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, q->properties.is_active = true; increment_queue_count(dqm, &pdd->qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = add_queue_mes(dqm, q, qpd); if (retval) { pr_err("Failed to restore queue %d\n", @@ -1141,7 +1141,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, } } } - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); qpd->evicted = 0; @@ -1282,7 +1282,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) if (test_bit(pipe_offset + queue, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) dqm->allocated_queues[pipe] |= 1 << queue; } @@ -1426,14 +1426,14 @@ static int set_sched_resources(struct device_queue_manager *dqm) int i, mec; struct scheduling_resources res; - res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap; + res.vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; res.queue_mask = 0; for (i = 0; i < KGD_MAX_QUEUES; ++i) { - mec = (i / dqm->dev->shared_resources.num_queue_per_pipe) - / dqm->dev->shared_resources.num_pipe_per_mec; + mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe) + / dqm->dev->kfd->shared_resources.num_pipe_per_mec; - if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap)) + if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap)) continue; /* only acquire queues from the first MEC */ @@ -1489,7 +1489,7 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm_lock(dqm); - if (!dqm->dev->shared_resources.enable_mes) { + if (!dqm->dev->kfd->shared_resources.enable_mes) { retval = pm_init(&dqm->packet_mgr, dqm); if (retval) goto fail_packet_manager_init; @@ -1516,14 +1516,14 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm->is_hws_hang = false; dqm->is_resetting = false; dqm->sched_running = true; - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); return 0; fail_allocate_vidmem: fail_set_sched_resources: - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_uninit(&dqm->packet_mgr, false); fail_packet_manager_init: dqm_unlock(dqm); @@ -1541,7 +1541,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) } if (!dqm->is_hws_hang) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false); else remove_all_queues_mes(dqm); @@ -1550,11 +1550,11 @@ static int stop_cpsch(struct device_queue_manager *dqm) hanging = dqm->is_hws_hang || dqm->is_resetting; dqm->sched_running = false; - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_release_ib(&dqm->packet_mgr); kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_uninit(&dqm->packet_mgr, hanging); dqm_unlock(dqm); @@ -1673,7 +1673,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, if (q->properties.is_active) { increment_queue_count(dqm, qpd, q); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); else @@ -1893,7 +1893,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, list_del(&q->list); qpd->queue_count--; if (q->properties.is_active) { - if (!dqm->dev->shared_resources.enable_mes) { + if (!dqm->dev->kfd->shared_resources.enable_mes) { decrement_queue_count(dqm, qpd, q); retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); @@ -2056,7 +2056,7 @@ static int get_wave_state(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || - q->properties.is_active || !q->device->cwsr_enabled || + q->properties.is_active || !q->device->kfd->cwsr_enabled || !mqd_mgr->get_wave_state) { dqm_unlock(dqm); return -EINVAL; @@ -2105,7 +2105,7 @@ static int checkpoint_mqd(struct device_queue_manager *dqm, dqm_lock(dqm); - if (q->properties.is_active || !q->device->cwsr_enabled) { + if (q->properties.is_active || !q->device->kfd->cwsr_enabled) { r = -EINVAL; goto dqm_unlock; } @@ -2158,7 +2158,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, if (q->properties.is_active) { decrement_queue_count(dqm, qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = remove_queue_mes(dqm, q, qpd); if (retval) pr_err("Failed to remove queue %d\n", @@ -2180,7 +2180,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, } } - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, filter, 0); if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { @@ -2242,11 +2242,11 @@ out_free: static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) { int retval; - struct kfd_dev *dev = dqm->dev; + struct kfd_node *dev = dqm->dev; struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * get_num_all_sdma_engines(dqm) * - dev->device_info.num_sdma_queues_per_engine + + dev->kfd->device_info.num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, @@ -2256,7 +2256,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) return retval; } -struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) +struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) { struct device_queue_manager *dqm; @@ -2373,7 +2373,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) if (init_mqd_managers(dqm)) goto out_free; - if (!dev->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) { + if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) { pr_err("Failed to allocate hiq sdma mqd trunk buffer\n"); goto out_free; } @@ -2386,7 +2386,7 @@ out_free: return NULL; } -static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, +static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd) { WARN(!mqd, "No hiq sdma mqd trunk to free"); @@ -2397,7 +2397,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, void device_queue_manager_uninit(struct device_queue_manager *dqm) { dqm->ops.uninitialize(dqm); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd); kfree(dqm); } @@ -2479,7 +2479,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { if (!test_bit(pipe_offset + queue, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) continue; r = dqm->dev->kfd2kgd->hqd_dump( @@ -2497,7 +2497,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) { for (queue = 0; - queue < dqm->dev->device_info.num_sdma_queues_per_engine; + queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine; queue++) { r = dqm->dev->kfd2kgd->hqd_sdma_dump( dqm->dev->adev, pipe, queue, &dump, &n_regs); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index a537b9ef3e16..e554a48f3054 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -207,7 +207,7 @@ struct device_queue_manager_asic_ops { struct queue *q, struct qcm_process_device *qpd); struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); }; /** @@ -228,7 +228,7 @@ struct device_queue_manager { struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX]; struct packet_manager packet_mgr; - struct kfd_dev *dev; + struct kfd_node *dev; struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */ struct list_head queues; unsigned int saved_flags; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index 914d94679d73..8af643388768 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -60,10 +60,10 @@ static int update_qpd_v9(struct device_queue_manager *dqm, qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; - if (dqm->dev->noretry && !dqm->dev->use_iommu_v2) + if (dqm->dev->kfd->noretry && !dqm->dev->kfd->use_iommu_v2) qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; - if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3)) + if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3)) qpd->sh_mem_config |= (1 << SH_MEM_CONFIG__F8_MODE__SHIFT); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index 38c9e1ca6691..6421b620388d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -138,7 +138,7 @@ void kfd_doorbell_fini(struct kfd_dev *kfd) iounmap(kfd->doorbell_kernel_ptr); } -int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma) { phys_addr_t address; @@ -148,7 +148,7 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, * For simplicitly we only allow mapping of the entire doorbell * allocation of a single device & process. */ - if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev)) + if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev->kfd)) return -EINVAL; pdd = kfd_get_process_device_data(dev, process); @@ -170,13 +170,13 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, " vm_flags == 0x%04lX\n" " size == 0x%04lX\n", (unsigned long long) vma->vm_start, address, vma->vm_flags, - kfd_doorbell_process_slice(dev)); + kfd_doorbell_process_slice(dev->kfd)); return io_remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, - kfd_doorbell_process_slice(dev), + kfd_doorbell_process_slice(dev->kfd), vma->vm_page_prot); } @@ -278,14 +278,14 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd) phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd) { if (!pdd->doorbell_index) { - int r = kfd_alloc_process_doorbells(pdd->dev, + int r = kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index); if (r < 0) return 0; } - return pdd->dev->doorbell_base + - pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev); + return pdd->dev->kfd->doorbell_base + + pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev->kfd); } int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_index) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index c894cf8f7c50..9926186f88a6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -348,7 +348,7 @@ static int kfd_event_page_set(struct kfd_process *p, void *kernel_address, int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset) { - struct kfd_dev *kfd; + struct kfd_node *kfd; struct kfd_process_device *pdd; void *mem, *kern_addr; uint64_t size; @@ -1125,7 +1125,7 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, } #ifdef KFD_SUPPORT_IOMMU_V2 -void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid, +void kfd_signal_iommu_event(struct kfd_node *dev, u32 pasid, unsigned long address, bool is_write_requested, bool is_execute_requested) { @@ -1221,8 +1221,8 @@ void kfd_signal_hw_exception_event(u32 pasid) kfd_unref_process(p); } -void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, - struct kfd_vm_fault_info *info) +void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, + struct kfd_vm_fault_info *info) { struct kfd_event *ev; uint32_t id; @@ -1269,7 +1269,7 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, kfd_unref_process(p); } -void kfd_signal_reset_event(struct kfd_dev *dev) +void kfd_signal_reset_event(struct kfd_node *dev) { struct kfd_hsa_hw_exception_data hw_exception_data; struct kfd_hsa_memory_exception_data memory_exception_data; @@ -1325,7 +1325,7 @@ void kfd_signal_reset_event(struct kfd_dev *dev) srcu_read_unlock(&kfd_processes_srcu, idx); } -void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid) +void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid) { struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); struct kfd_hsa_memory_exception_data memory_exception_data; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 8aebe408c544..da2ca00d79e5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -322,21 +322,21 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) pdd->lds_base = MAKE_LDS_APP_BASE_VI(); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); - if (!pdd->dev->use_iommu_v2) { + if (!pdd->dev->kfd->use_iommu_v2) { /* dGPUs: SVM aperture starting at 0 * with small reserved space for kernel. * Set them to CANONICAL addresses. */ pdd->gpuvm_base = SVM_USER_BASE; pdd->gpuvm_limit = - pdd->dev->shared_resources.gpuvm_size - 1; + pdd->dev->kfd->shared_resources.gpuvm_size - 1; } else { /* set them to non CANONICAL addresses, and no SVM is * allocated. */ pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1); pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base, - pdd->dev->shared_resources.gpuvm_size); + pdd->dev->kfd->shared_resources.gpuvm_size); } pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI(); @@ -356,7 +356,7 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id) */ pdd->gpuvm_base = SVM_USER_BASE; pdd->gpuvm_limit = - pdd->dev->shared_resources.gpuvm_size - 1; + pdd->dev->kfd->shared_resources.gpuvm_size - 1; pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9(); pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); @@ -365,7 +365,7 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id) int kfd_init_apertures(struct kfd_process *process) { uint8_t id = 0; - struct kfd_dev *dev; + struct kfd_node *dev; struct kfd_process_device *pdd; /*Iterating over all devices*/ @@ -417,7 +417,7 @@ int kfd_init_apertures(struct kfd_process *process) } } - if (!dev->use_iommu_v2) { + if (!dev->kfd->use_iommu_v2) { /* dGPUs: the reserved space for kernel * before SVM */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index 0d53f6067422..0f0fdea4cd8a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -187,7 +187,7 @@ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1) REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID)); } -static void event_interrupt_poison_consumption_v11(struct kfd_dev *dev, +static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, uint16_t pasid, uint16_t source_id) { int ret = -EINVAL; @@ -225,7 +225,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_dev *dev, amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true); } -static bool event_interrupt_isr_v11(struct kfd_dev *dev, +static bool event_interrupt_isr_v11(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag) @@ -274,7 +274,7 @@ static bool event_interrupt_isr_v11(struct kfd_dev *dev, !amdgpu_no_queue_eviction_on_vm_fault); } -static void event_interrupt_wq_v11(struct kfd_dev *dev, +static void event_interrupt_wq_v11(struct kfd_node *dev, const uint32_t *ih_ring_entry) { uint16_t source_id, client_id, ring_id, pasid, vmid; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 0b75a37b689b..861bccb1e9dc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -90,7 +90,7 @@ enum SQ_INTERRUPT_ERROR_TYPE { #define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000 #define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20 -static void event_interrupt_poison_consumption_v9(struct kfd_dev *dev, +static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { int old_poison, ret = -EINVAL; @@ -160,7 +160,7 @@ static bool context_id_expected(struct kfd_dev *dev) } } -static bool event_interrupt_isr_v9(struct kfd_dev *dev, +static bool event_interrupt_isr_v9(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag) @@ -206,7 +206,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, *patched_flag = true; memcpy(patched_ihre, ih_ring_entry, - dev->device_info.ih_ring_entry_size); + dev->kfd->device_info.ih_ring_entry_size); pasid = dev->dqm->vmid_pasid[vmid]; @@ -235,7 +235,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, uint32_t context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); - if (context_id == 0 && context_id_expected(dev)) + if (context_id == 0 && context_id_expected(dev->kfd)) return false; } @@ -253,7 +253,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, !amdgpu_no_queue_eviction_on_vm_fault); } -static void event_interrupt_wq_v9(struct kfd_dev *dev, +static void event_interrupt_wq_v9(struct kfd_node *dev, const uint32_t *ih_ring_entry) { uint16_t source_id, client_id, pasid, vmid; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index 34772fe74296..dd3c43c1ad70 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -50,29 +50,29 @@ static void interrupt_wq(struct work_struct *); -int kfd_interrupt_init(struct kfd_dev *kfd) +int kfd_interrupt_init(struct kfd_node *node) { int r; - r = kfifo_alloc(&kfd->ih_fifo, - KFD_IH_NUM_ENTRIES * kfd->device_info.ih_ring_entry_size, + r = kfifo_alloc(&node->ih_fifo, + KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size, GFP_KERNEL); if (r) { - dev_err(kfd->adev->dev, "Failed to allocate IH fifo\n"); + dev_err(node->adev->dev, "Failed to allocate IH fifo\n"); return r; } - kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); - if (unlikely(!kfd->ih_wq)) { - kfifo_free(&kfd->ih_fifo); - dev_err(kfd->adev->dev, "Failed to allocate KFD IH workqueue\n"); + node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); + if (unlikely(!node->ih_wq)) { + kfifo_free(&node->ih_fifo); + dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n"); return -ENOMEM; } - spin_lock_init(&kfd->interrupt_lock); + spin_lock_init(&node->interrupt_lock); - INIT_WORK(&kfd->interrupt_work, interrupt_wq); + INIT_WORK(&node->interrupt_work, interrupt_wq); - kfd->interrupts_active = true; + node->interrupts_active = true; /* * After this function returns, the interrupt will be enabled. This @@ -84,7 +84,7 @@ int kfd_interrupt_init(struct kfd_dev *kfd) return 0; } -void kfd_interrupt_exit(struct kfd_dev *kfd) +void kfd_interrupt_exit(struct kfd_node *node) { /* * Stop the interrupt handler from writing to the ring and scheduling @@ -93,31 +93,31 @@ void kfd_interrupt_exit(struct kfd_dev *kfd) */ unsigned long flags; - spin_lock_irqsave(&kfd->interrupt_lock, flags); - kfd->interrupts_active = false; - spin_unlock_irqrestore(&kfd->interrupt_lock, flags); + spin_lock_irqsave(&node->interrupt_lock, flags); + node->interrupts_active = false; + spin_unlock_irqrestore(&node->interrupt_lock, flags); /* * flush_work ensures that there are no outstanding * work-queue items that will access interrupt_ring. New work items * can't be created because we stopped interrupt handling above. */ - flush_workqueue(kfd->ih_wq); + flush_workqueue(node->ih_wq); - kfifo_free(&kfd->ih_fifo); + kfifo_free(&node->ih_fifo); } /* * Assumption: single reader/writer. This function is not re-entrant */ -bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) +bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry) { int count; - count = kfifo_in(&kfd->ih_fifo, ih_ring_entry, - kfd->device_info.ih_ring_entry_size); - if (count != kfd->device_info.ih_ring_entry_size) { - dev_dbg_ratelimited(kfd->adev->dev, + count = kfifo_in(&node->ih_fifo, ih_ring_entry, + node->kfd->device_info.ih_ring_entry_size); + if (count != node->kfd->device_info.ih_ring_entry_size) { + dev_dbg_ratelimited(node->adev->dev, "Interrupt ring overflow, dropping interrupt %d\n", count); return false; @@ -129,32 +129,32 @@ bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) /* * Assumption: single reader/writer. This function is not re-entrant */ -static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry) +static bool dequeue_ih_ring_entry(struct kfd_node *node, void *ih_ring_entry) { int count; - count = kfifo_out(&kfd->ih_fifo, ih_ring_entry, - kfd->device_info.ih_ring_entry_size); + count = kfifo_out(&node->ih_fifo, ih_ring_entry, + node->kfd->device_info.ih_ring_entry_size); - WARN_ON(count && count != kfd->device_info.ih_ring_entry_size); + WARN_ON(count && count != node->kfd->device_info.ih_ring_entry_size); - return count == kfd->device_info.ih_ring_entry_size; + return count == node->kfd->device_info.ih_ring_entry_size; } static void interrupt_wq(struct work_struct *work) { - struct kfd_dev *dev = container_of(work, struct kfd_dev, + struct kfd_node *dev = container_of(work, struct kfd_node, interrupt_work); uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE]; unsigned long start_jiffies = jiffies; - if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) { + if (dev->kfd->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) { dev_err_once(dev->adev->dev, "Ring entry too small\n"); return; } while (dequeue_ih_ring_entry(dev, ih_ring_entry)) { - dev->device_info.event_interrupt_class->interrupt_wq(dev, + dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev, ih_ring_entry); if (time_is_before_jiffies(start_jiffies + HZ)) { /* If we spent more than a second processing signals, @@ -166,14 +166,14 @@ static void interrupt_wq(struct work_struct *work) } } -bool interrupt_is_wanted(struct kfd_dev *dev, +bool interrupt_is_wanted(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *flag) { /* integer and bitwise OR so there is no boolean short-circuiting */ unsigned int wanted = 0; - wanted |= dev->device_info.event_interrupt_class->interrupt_isr(dev, + wanted |= dev->kfd->device_info.event_interrupt_class->interrupt_isr(dev, ih_ring_entry, patched_ihre, flag); return wanted != 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index ec1bf611624e..6eee9a0944f3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -109,11 +109,11 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) */ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; struct kfd_process *p = pdd->process; int err; - if (!dev->use_iommu_v2 || pdd->bound == PDD_BOUND) + if (!dev->kfd->use_iommu_v2 || pdd->bound == PDD_BOUND) return 0; if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) { @@ -146,7 +146,7 @@ void kfd_iommu_unbind_process(struct kfd_process *p) /* Callback for process shutdown invoked by the IOMMU driver */ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid) { - struct kfd_dev *dev = kfd_device_by_pci_dev(pdev); + struct kfd_node *dev = kfd_device_by_pci_dev(pdev); struct kfd_process *p; struct kfd_process_device *pdd; @@ -182,7 +182,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid) static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid, unsigned long address, u16 flags) { - struct kfd_dev *dev; + struct kfd_node *dev; dev_warn_ratelimited(kfd_device, "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X", @@ -205,7 +205,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid, * Bind processes do the device that have been temporarily unbound * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device. */ -static int kfd_bind_processes_to_device(struct kfd_dev *kfd) +static int kfd_bind_processes_to_device(struct kfd_node *knode) { struct kfd_process_device *pdd; struct kfd_process *p; @@ -216,14 +216,14 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd) hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { mutex_lock(&p->mutex); - pdd = kfd_get_process_device_data(kfd, p); + pdd = kfd_get_process_device_data(knode, p); if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) { mutex_unlock(&p->mutex); continue; } - err = amd_iommu_bind_pasid(kfd->adev->pdev, p->pasid, + err = amd_iommu_bind_pasid(knode->adev->pdev, p->pasid, p->lead_thread); if (err < 0) { pr_err("Unexpected pasid 0x%x binding failure\n", @@ -246,7 +246,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd) * processes will be restored to PDD_BOUND state in * kfd_bind_processes_to_device. */ -static void kfd_unbind_processes_from_device(struct kfd_dev *kfd) +static void kfd_unbind_processes_from_device(struct kfd_node *knode) { struct kfd_process_device *pdd; struct kfd_process *p; @@ -256,7 +256,7 @@ static void kfd_unbind_processes_from_device(struct kfd_dev *kfd) hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { mutex_lock(&p->mutex); - pdd = kfd_get_process_device_data(kfd, p); + pdd = kfd_get_process_device_data(knode, p); if (WARN_ON(!pdd)) { mutex_unlock(&p->mutex); @@ -281,7 +281,7 @@ void kfd_iommu_suspend(struct kfd_dev *kfd) if (!kfd->use_iommu_v2) return; - kfd_unbind_processes_from_device(kfd); + kfd_unbind_processes_from_device(kfd->node); amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); @@ -312,7 +312,7 @@ int kfd_iommu_resume(struct kfd_dev *kfd) amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, iommu_invalid_ppr_cb); - err = kfd_bind_processes_to_device(kfd); + err = kfd_bind_processes_to_device(kfd->node); if (err) { amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index bcf7bc3302c9..1bea629c49ca 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -38,7 +38,7 @@ /* Initialize a kernel queue, including allocations of GART memory * needed for the queue. */ -static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, +static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev, enum kfd_queue_type type, unsigned int queue_size) { struct queue_properties prop; @@ -75,7 +75,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, if (!kq->mqd_mgr) return false; - prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); + prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off); if (!prop.doorbell_ptr) { pr_err("Failed to initialize doorbell"); @@ -112,7 +112,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->rptr_kernel = kq->rptr_mem->cpu_ptr; kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr; - retval = kfd_gtt_sa_allocate(dev, dev->device_info.doorbell_size, + retval = kfd_gtt_sa_allocate(dev, dev->kfd->device_info.doorbell_size, &kq->wptr_mem); if (retval != 0) @@ -189,7 +189,7 @@ err_rptr_allocate_vidmem: err_eop_allocate_vidmem: kfd_gtt_sa_free(dev, kq->pq); err_pq_allocate_vidmem: - kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); + kfd_release_kernel_doorbell(dev->kfd, prop.doorbell_ptr); err_get_kernel_doorbell: return false; @@ -220,7 +220,7 @@ static void kq_uninitialize(struct kernel_queue *kq, bool hanging) kfd_gtt_sa_free(kq->dev, kq->eop_mem); kfd_gtt_sa_free(kq->dev, kq->pq); - kfd_release_kernel_doorbell(kq->dev, + kfd_release_kernel_doorbell(kq->dev->kfd, kq->queue->properties.doorbell_ptr); uninit_queue(kq->queue); } @@ -298,7 +298,7 @@ void kq_submit_packet(struct kernel_queue *kq) } pr_debug("\n"); #endif - if (kq->dev->device_info.doorbell_size == 8) { + if (kq->dev->kfd->device_info.doorbell_size == 8) { *kq->wptr64_kernel = kq->pending_wptr64; write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, kq->pending_wptr64); @@ -311,7 +311,7 @@ void kq_submit_packet(struct kernel_queue *kq) void kq_rollback_packet(struct kernel_queue *kq) { - if (kq->dev->device_info.doorbell_size == 8) { + if (kq->dev->kfd->device_info.doorbell_size == 8) { kq->pending_wptr64 = *kq->wptr64_kernel; kq->pending_wptr = *kq->wptr_kernel % (kq->queue->properties.queue_size / 4); @@ -320,7 +320,7 @@ void kq_rollback_packet(struct kernel_queue *kq) } } -struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, +struct kernel_queue *kernel_queue_init(struct kfd_node *dev, enum kfd_queue_type type) { struct kernel_queue *kq; @@ -345,7 +345,7 @@ void kernel_queue_uninit(struct kernel_queue *kq, bool hanging) } /* FIXME: Can this test be removed? */ -static __attribute__((unused)) void test_kq(struct kfd_dev *dev) +static __attribute__((unused)) void test_kq(struct kfd_node *dev) { struct kernel_queue *kq; uint32_t *buffer, i; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h index 383202fd1ea2..9a6244430845 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -53,7 +53,7 @@ void kq_rollback_packet(struct kernel_queue *kq); struct kernel_queue { /* data */ - struct kfd_dev *dev; + struct kfd_node *dev; struct mqd_manager *mqd_mgr; struct queue *queue; uint64_t pending_wptr64; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 54933903bcb8..1e187677c90a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -423,7 +423,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->id, prange->prefetch_loc, + 0, adev->kfd.dev->node->id, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -456,7 +456,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->id, trigger); + 0, adev->kfd.dev->node->id, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); svm_range_free_dma_mappings(prange); @@ -701,7 +701,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->id, 0, prange->prefetch_loc, + adev->kfd.dev->node->id, 0, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -737,7 +737,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->id, 0, trigger); + adev->kfd.dev->node->id, 0, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 623ccd227b7d..61f6dd68c84b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -46,7 +46,7 @@ int pipe_priority_map[] = { KFD_PIPE_PRIORITY_CS_HIGH }; -struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_properties *q) +struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj = NULL; @@ -61,7 +61,7 @@ struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_propertie return mqd_mem_obj; } -struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev, +struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj = NULL; @@ -72,7 +72,7 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev, return NULL; offset = (q->sdma_engine_id * - dev->device_info.num_sdma_queues_per_engine + + dev->kfd->device_info.num_sdma_queues_per_engine + q->sdma_queue_id) * dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index 57f900ccaa10..46fc3f273d0d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -68,7 +68,7 @@ */ extern int pipe_priority_map[]; struct mqd_manager { - struct kfd_mem_obj* (*allocate_mqd)(struct kfd_dev *kfd, + struct kfd_mem_obj* (*allocate_mqd)(struct kfd_node *kfd, struct queue_properties *q); void (*init_mqd)(struct mqd_manager *mm, void **mqd, @@ -121,14 +121,14 @@ struct mqd_manager { uint32_t (*read_doorbell_id)(void *mqd); struct mutex mqd_mutex; - struct kfd_dev *dev; + struct kfd_node *dev; uint32_t mqd_size; }; -struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, +struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q); -struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev, +struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, struct queue_properties *q); void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd, struct kfd_mem_obj *mqd_mem_obj); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 4889865c725c..03e04d5e5a11 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -74,7 +74,7 @@ static void set_priority(struct cik_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -390,7 +390,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; @@ -470,7 +470,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, } struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index d3e2b6a599a4..7a93be0ebb19 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -74,7 +74,7 @@ static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -122,7 +122,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; } - if (mm->dev->cwsr_enabled) { + if (mm->dev->kfd->cwsr_enabled) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -210,7 +210,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_pq_doorbell_control |= 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; } - if (mm->dev->cwsr_enabled) + if (mm->dev->kfd->cwsr_enabled) m->cp_hqd_ctx_save_control = 0; update_cu_mask(mm, mqd, minfo); @@ -405,7 +405,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 5aa75f72caa1..dff171b54b5c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -81,7 +81,7 @@ static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -91,12 +91,12 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, * MES write to areas beyond MQD size. So allocate * 1 PAGE_SIZE memory for MQD is MES is enabled. */ - if (kfd->shared_resources.enable_mes) + if (node->kfd->shared_resources.enable_mes) size = PAGE_SIZE; else size = sizeof(struct v11_compute_mqd); - if (kfd_gtt_sa_allocate(kfd, size, &mqd_mem_obj)) + if (kfd_gtt_sa_allocate(node, size, &mqd_mem_obj)) return NULL; return mqd_mem_obj; @@ -113,7 +113,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr; addr = mqd_mem_obj->gpu_addr; - if (mm->dev->shared_resources.enable_mes) + if (mm->dev->kfd->shared_resources.enable_mes) size = PAGE_SIZE; else size = sizeof(struct v11_compute_mqd); @@ -155,7 +155,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; } - if (mm->dev->cwsr_enabled) { + if (mm->dev->kfd->cwsr_enabled) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -243,7 +243,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_pq_doorbell_control |= 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; } - if (mm->dev->cwsr_enabled) + if (mm->dev->kfd->cwsr_enabled) m->cp_hqd_ctx_save_control = 0; update_cu_mask(mm, mqd, minfo); @@ -319,7 +319,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr; - if (mm->dev->shared_resources.enable_mes) + if (mm->dev->kfd->shared_resources.enable_mes) size = PAGE_SIZE; else size = sizeof(struct v11_sdma_mqd); @@ -387,7 +387,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; @@ -463,7 +463,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, * To allocate SDMA MQDs by generic functions * when MES is enabled. */ - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { mqd->allocate_mqd = allocate_mqd; mqd->free_mqd = kfd_free_mqd_cp; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 51b53110341b..943a738e73f9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -83,7 +83,7 @@ static void set_priority(struct v9_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, struct queue_properties *q) { int retval; @@ -105,11 +105,11 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct * amdgpu memory functions to do so. */ - if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { + if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); if (!mqd_mem_obj) return NULL; - retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->adev, + retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev, ALIGN(q->ctl_stack_size, PAGE_SIZE) + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), &(mqd_mem_obj->gtt_mem), @@ -121,7 +121,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, return NULL; } } else { - retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd), + retval = kfd_gtt_sa_allocate(node, sizeof(struct v9_mqd), &mqd_mem_obj); if (retval) return NULL; @@ -136,7 +136,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, { uint64_t addr; struct v9_mqd *m; - struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev; m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr; addr = mqd_mem_obj->gpu_addr; @@ -169,7 +168,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, if (q->format == KFD_QUEUE_FORMAT_AQL) { m->cp_hqd_aql_control = 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (KFD_GC_VERSION(mm->dev) == IP_VERSION(9, 4, 3)) { /* On GC 9.4.3, DW 41 is re-purposed as * compute_tg_chunk_size. * TODO: review this setting when active CUs in the @@ -179,7 +178,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, } } else { /* PM4 queue */ - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (KFD_GC_VERSION(mm->dev) == IP_VERSION(9, 4, 3)) { m->compute_static_thread_mgmt_se6 = 0; /* TODO: program pm4_target_xcc */ } @@ -190,7 +189,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT); } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -225,7 +224,6 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, struct queue_properties *q, struct mqd_update_info *minfo) { - struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev; struct v9_mqd *m; m = get_mqd(mqd); @@ -275,13 +273,13 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT | 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT; - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) + if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) m->cp_hqd_pq_control |= - CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; + CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; m->cp_hqd_pq_doorbell_control |= 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) m->cp_hqd_ctx_save_control = 0; update_cu_mask(mm, mqd, minfo); @@ -487,7 +485,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 530ba6f5b57e..f6b4a5686dcb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -77,7 +77,7 @@ static void set_priority(struct vi_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -136,7 +136,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT); } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -227,7 +227,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT; } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) m->cp_hqd_ctx_save_control = atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT | mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT; @@ -446,7 +446,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; @@ -528,7 +528,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, } struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index f612325241aa..2f54172e9175 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -45,7 +45,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm, unsigned int process_count, queue_count, compute_queue_count, gws_queue_count; unsigned int map_queue_size; unsigned int max_proc_per_quantum = 1; - struct kfd_dev *dev = pm->dqm->dev; + struct kfd_node *dev = pm->dqm->dev; process_count = pm->dqm->processes_count; queue_count = pm->dqm->active_queue_count; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 18250845a989..54d7d4665ad2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -119,7 +119,7 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer, struct pm4_mes_runlist *packet; int concurrent_proc_cnt = 0; - struct kfd_dev *kfd = pm->dqm->dev; + struct kfd_node *kfd = pm->dqm->dev; /* Determine the number of processes to map together to HW: * it can not exceed the number of VMIDs available to the @@ -220,7 +220,8 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, case KFD_QUEUE_TYPE_SDMA: case KFD_QUEUE_TYPE_SDMA_XGMI: use_static = false; /* no static queues under SDMA */ - if (q->properties.sdma_engine_id < 2 && !pm_use_ext_eng(q->device)) + if (q->properties.sdma_engine_id < 2 && + !pm_use_ext_eng(q->device->kfd)) packet->bitfields2.engine_sel = q->properties.sdma_engine_id + engine_sel__mes_map_queues__sdma0_vi; else { @@ -263,7 +264,8 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, sizeof(struct pm4_mes_unmap_queues)); - packet->bitfields2.extended_engine_sel = pm_use_ext_eng(pm->dqm->dev) ? + packet->bitfields2.extended_engine_sel = + pm_use_ext_eng(pm->dqm->dev->kfd) ? extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel : extended_engine_sel__mes_unmap_queues__legacy_engine_sel; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index 4f951eaa6ee8..faf4772ed317 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -77,7 +77,7 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer, { struct pm4_mes_runlist *packet; int concurrent_proc_cnt = 0; - struct kfd_dev *kfd = pm->dqm->dev; + struct kfd_node *kfd = pm->dqm->dev; if (WARN_ON(!ib)) return -EFAULT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 94a438956868..fdb97e5d0c01 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -210,11 +210,13 @@ enum cache_policy { ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))) +struct kfd_node; + struct kfd_event_interrupt_class { - bool (*interrupt_isr)(struct kfd_dev *dev, + bool (*interrupt_isr)(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag); - void (*interrupt_wq)(struct kfd_dev *dev, + void (*interrupt_wq)(struct kfd_node *dev, const uint32_t *ih_ring_entry); }; @@ -236,8 +238,8 @@ struct kfd_device_info { uint64_t reserved_sdma_queues_bitmap; }; -unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev); -unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev); +unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev); struct kfd_mem_obj { uint32_t range_start; @@ -253,13 +255,59 @@ struct kfd_vmid_info { uint32_t vmid_num_kfd; }; +struct kfd_dev; + +struct kfd_node { + struct amdgpu_device *adev; /* Duplicated here along with keeping + * a copy in kfd_dev to save a hop + */ + const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with + * keeping a copy in kfd_dev to + * save a hop + */ + struct kfd_vmid_info vm_info; + unsigned int id; /* topology stub index */ + /* Interrupts */ + struct kfifo ih_fifo; + struct workqueue_struct *ih_wq; + struct work_struct interrupt_work; + spinlock_t interrupt_lock; + + /* + * Interrupts of interest to KFD are copied + * from the HW ring into a SW ring. + */ + bool interrupts_active; + + /* QCM Device instance */ + struct device_queue_manager *dqm; + + /* Global GWS resource shared between processes */ + void *gws; + bool gws_debug_workaround; + + /* Clients watching SMI events */ + struct list_head smi_clients; + spinlock_t smi_lock; + uint32_t reset_seq_num; + + /* SRAM ECC flag */ + atomic_t sram_ecc_flag; + + /*spm process id */ + unsigned int spm_pasid; + + /* Maximum process number mapped to HW scheduler */ + unsigned int max_proc_per_quantum; + + struct kfd_dev *kfd; +}; + struct kfd_dev { struct amdgpu_device *adev; struct kfd_device_info device_info; - unsigned int id; /* topology stub index */ - phys_addr_t doorbell_base; /* Start of actual doorbells used by * KFD. It is aligned for mapping * into user mode @@ -274,7 +322,6 @@ struct kfd_dev { */ struct kgd2kfd_shared_resources shared_resources; - struct kfd_vmid_info vm_info; struct kfd_local_mem_info local_mem_info; const struct kfd2kgd_calls *kfd2kgd; @@ -290,30 +337,13 @@ struct kfd_dev { unsigned int gtt_sa_chunk_size; unsigned int gtt_sa_num_of_chunks; - /* Interrupts */ - struct kfifo ih_fifo; - struct workqueue_struct *ih_wq; - struct work_struct interrupt_work; - spinlock_t interrupt_lock; - - /* QCM Device instance */ - struct device_queue_manager *dqm; - bool init_complete; - /* - * Interrupts of interest to KFD are copied - * from the HW ring into a SW ring. - */ - bool interrupts_active; /* Firmware versions */ uint16_t mec_fw_version; uint16_t mec2_fw_version; uint16_t sdma_fw_version; - /* Maximum process number mapped to HW scheduler */ - unsigned int max_proc_per_quantum; - /* CWSR */ bool cwsr_enabled; const void *cwsr_isa; @@ -327,21 +357,9 @@ struct kfd_dev { /* Use IOMMU v2 flag */ bool use_iommu_v2; - /* SRAM ECC flag */ - atomic_t sram_ecc_flag; - /* Compute Profile ref. count */ atomic_t compute_profile; - /* Global GWS resource shared between processes */ - void *gws; - - /* Clients watching SMI events */ - struct list_head smi_clients; - spinlock_t smi_lock; - - uint32_t reset_seq_num; - struct ida doorbell_ida; unsigned int max_doorbell_slices; @@ -349,6 +367,8 @@ struct kfd_dev { /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ struct dev_pagemap pgmap; + + struct kfd_node *node; }; enum kfd_mempool { @@ -563,7 +583,7 @@ struct queue { unsigned int doorbell_id; struct kfd_process *process; - struct kfd_dev *device; + struct kfd_node *device; void *gws; /* procfs */ @@ -697,7 +717,7 @@ enum kfd_pdd_bound { /* Data that is per-process-per device. */ struct kfd_process_device { /* The device that owns this data. */ - struct kfd_dev *dev; + struct kfd_node *dev; /* The process that owns this kfd_process_device. */ struct kfd_process *process; @@ -925,7 +945,7 @@ struct amdkfd_ioctl_desc { unsigned int cmd_drv; const char *name; }; -bool kfd_dev_is_large_bar(struct kfd_dev *dev); +bool kfd_dev_is_large_bar(struct kfd_node *dev); int kfd_process_create_wq(void); void kfd_process_destroy_wq(void); @@ -961,16 +981,16 @@ int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id); int kfd_process_device_init_vm(struct kfd_process_device *pdd, struct file *drm_file); -struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, +struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, struct kfd_process *p); -struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, struct kfd_process *p); -struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, struct kfd_process *p); bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); -int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma); /* KFD process API for creating and translating handles */ @@ -994,7 +1014,7 @@ void kfd_pasid_free(u32 pasid); size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); int kfd_doorbell_init(struct kfd_dev *kfd); void kfd_doorbell_fini(struct kfd_dev *kfd); -int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma); void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, unsigned int *doorbell_off); @@ -1012,10 +1032,10 @@ void kfd_free_process_doorbells(struct kfd_dev *kfd, unsigned int doorbell_index); /* GTT Sub-Allocator */ -int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, +int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, struct kfd_mem_obj **mem_obj); -int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj); +int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj); extern struct device *kfd_device; @@ -1028,25 +1048,25 @@ void kfd_procfs_del_queue(struct queue *q); /* Topology */ int kfd_topology_init(void); void kfd_topology_shutdown(void); -int kfd_topology_add_device(struct kfd_dev *gpu); -int kfd_topology_remove_device(struct kfd_dev *gpu); +int kfd_topology_add_device(struct kfd_node *gpu); +int kfd_topology_remove_device(struct kfd_node *gpu); struct kfd_topology_device *kfd_topology_device_by_proximity_domain( uint32_t proximity_domain); struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( uint32_t proximity_domain); struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); -struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); -struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); -struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev); -int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); +struct kfd_node *kfd_device_by_id(uint32_t gpu_id); +struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); +struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); +int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); /* Interrupts */ -int kfd_interrupt_init(struct kfd_dev *dev); -void kfd_interrupt_exit(struct kfd_dev *dev); -bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); -bool interrupt_is_wanted(struct kfd_dev *dev, +int kfd_interrupt_init(struct kfd_node *dev); +void kfd_interrupt_exit(struct kfd_node *dev); +bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); +bool interrupt_is_wanted(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *flag); @@ -1174,22 +1194,22 @@ void print_queue_properties(struct queue_properties *q); void print_queue(struct queue *q); struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); -struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); + struct kfd_node *dev); +struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev); void device_queue_manager_uninit(struct device_queue_manager *dqm); -struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, +struct kernel_queue *kernel_queue_init(struct kfd_node *dev, enum kfd_queue_type type); void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid); @@ -1206,7 +1226,7 @@ void kfd_process_dequeue_from_all_devices(struct kfd_process *p); int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); void pqm_uninit(struct process_queue_manager *pqm); int pqm_create_queue(struct process_queue_manager *pqm, - struct kfd_dev *dev, + struct kfd_node *dev, struct file *f, struct queue_properties *properties, unsigned int *qid, @@ -1323,7 +1343,7 @@ int kfd_wait_on_events(struct kfd_process *p, uint32_t *wait_result); void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, uint32_t valid_id_bits); -void kfd_signal_iommu_event(struct kfd_dev *dev, +void kfd_signal_iommu_event(struct kfd_node *dev, u32 pasid, unsigned long address, bool is_write_requested, bool is_execute_requested); void kfd_signal_hw_exception_event(u32 pasid); @@ -1339,12 +1359,12 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p, int kfd_get_num_events(struct kfd_process *p); int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); -void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, +void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, struct kfd_vm_fault_info *info); -void kfd_signal_reset_event(struct kfd_dev *dev); +void kfd_signal_reset_event(struct kfd_node *dev); -void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid); +void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid); void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); @@ -1359,12 +1379,12 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) bool kfd_is_locked(void); /* Compute profile */ -void kfd_inc_compute_active(struct kfd_dev *dev); -void kfd_dec_compute_active(struct kfd_dev *dev); +void kfd_inc_compute_active(struct kfd_node *dev); +void kfd_dec_compute_active(struct kfd_node *dev); /* Cgroup Support */ /* Check with device cgroup if @kfd device is accessible */ -static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) +static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) { #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) struct drm_device *ddev = adev_to_drm(kfd->adev); @@ -1389,7 +1409,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data); int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); int pm_debugfs_runlist(struct seq_file *m, void *data); -int kfd_debugfs_hang_hws(struct kfd_dev *dev); +int kfd_debugfs_hang_hws(struct kfd_node *dev); int pm_debugfs_hang_hws(struct packet_manager *pm); int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 07a9eaf9b7d8..66e021889c64 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -269,7 +269,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) int cu_cnt; int wave_cnt; int max_waves_per_cu; - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; struct kfd_process *proc = NULL; struct kfd_process_device *pdd = NULL; @@ -691,7 +691,7 @@ void kfd_process_destroy_wq(void) static void kfd_process_free_gpuvm(struct kgd_mem *mem, struct kfd_process_device *pdd, void **kptr) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; if (kptr && *kptr) { amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); @@ -713,7 +713,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, uint64_t gpu_va, uint32_t size, uint32_t flags, struct kgd_mem **mem, void **kptr) { - struct kfd_dev *kdev = pdd->dev; + struct kfd_node *kdev = pdd->dev; int err; err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size, @@ -982,7 +982,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd) static void kfd_process_kunmap_signal_bo(struct kfd_process *p) { struct kfd_process_device *pdd; - struct kfd_dev *kdev; + struct kfd_node *kdev; void *mem; kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle)); @@ -1040,9 +1040,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) bitmap_free(pdd->qpd.doorbell_bitmap); idr_destroy(&pdd->alloc_idr); - kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index); + kfd_free_process_doorbells(pdd->dev->kfd, pdd->doorbell_index); - if (pdd->dev->shared_resources.enable_mes) + if (pdd->dev->kfd->shared_resources.enable_mes) amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, pdd->proc_ctx_bo); /* @@ -1259,10 +1259,10 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) int i; for (i = 0; i < p->n_pdds; i++) { - struct kfd_dev *dev = p->pdds[i]->dev; + struct kfd_node *dev = p->pdds[i]->dev; struct qcm_process_device *qpd = &p->pdds[i]->qpd; - if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) + if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) continue; offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id); @@ -1279,7 +1279,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) return err; } - memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); + memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", @@ -1291,7 +1291,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; struct qcm_process_device *qpd = &pdd->qpd; uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE @@ -1300,7 +1300,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) void *kaddr; int ret; - if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) + if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) return 0; /* cwsr_base is only set for dGPU */ @@ -1313,7 +1313,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) qpd->cwsr_kaddr = kaddr; qpd->tba_addr = qpd->cwsr_base; - memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); + memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", @@ -1324,10 +1324,10 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; struct qcm_process_device *qpd = &pdd->qpd; - if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) + if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) return; kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr); @@ -1371,7 +1371,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * support retry. */ for (i = 0; i < p->n_pdds; i++) { - struct kfd_dev *dev = p->pdds[i]->dev; + struct kfd_node *dev = p->pdds[i]->dev; /* Only consider GFXv9 and higher GPUs. Older GPUs don't * support the SVM APIs and don't need to be considered @@ -1394,7 +1394,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) return false; - if (dev->noretry) + if (dev->kfd->noretry) return false; } @@ -1528,7 +1528,7 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, return 0; } -struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, struct kfd_process *p) { int i; @@ -1540,7 +1540,7 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, return NULL; } -struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, struct kfd_process *p) { struct kfd_process_device *pdd = NULL; @@ -1552,7 +1552,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, if (!pdd) return NULL; - if (init_doorbell_bitmap(&pdd->qpd, dev)) { + if (init_doorbell_bitmap(&pdd->qpd, dev->kfd)) { pr_err("Failed to init doorbell for process\n"); goto err_free_pdd; } @@ -1573,7 +1573,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, pdd->user_gpu_id = dev->id; atomic64_set(&pdd->evict_duration_counter, 0); - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, AMDGPU_MES_PROC_CTX_SIZE, &pdd->proc_ctx_bo, @@ -1619,7 +1619,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, struct amdgpu_fpriv *drv_priv; struct amdgpu_vm *avm; struct kfd_process *p; - struct kfd_dev *dev; + struct kfd_node *dev; int ret; if (!drm_file) @@ -1679,7 +1679,7 @@ err_reserve_ib_mem: * * Assumes that the process lock is held. */ -struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, +struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, struct kfd_process *p) { struct kfd_process_device *pdd; @@ -1811,7 +1811,7 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; - kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid, + kfd_smi_event_queue_eviction(pdd->dev->kfd, p->lead_thread->pid, trigger); r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, @@ -1839,7 +1839,7 @@ fail: if (n_evicted == 0) break; - kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); + kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid); if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, &pdd->qpd)) @@ -1860,7 +1860,7 @@ int kfd_process_restore_queues(struct kfd_process *p) for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; - kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); + kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid); r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, &pdd->qpd); @@ -2016,7 +2016,7 @@ int kfd_resume_all_processes(void) return ret; } -int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma) { struct kfd_process_device *pdd; @@ -2051,7 +2051,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) { struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; /* * It can be that we race and lose here, but that is extremely unlikely diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 4236539d9f93..5602498e713f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -81,7 +81,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; if (pdd->already_dequeued) return; @@ -93,7 +93,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, void *gws) { - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; struct process_queue_node *pqn; struct kfd_process_device *pdd; struct kgd_mem *mem = NULL; @@ -178,7 +178,7 @@ void pqm_uninit(struct process_queue_manager *pqm) } static int init_user_queue(struct process_queue_manager *pqm, - struct kfd_dev *dev, struct queue **q, + struct kfd_node *dev, struct queue **q, struct queue_properties *q_properties, struct file *f, struct amdgpu_bo *wptr_bo, unsigned int qid) @@ -199,7 +199,7 @@ static int init_user_queue(struct process_queue_manager *pqm, (*q)->device = dev; (*q)->process = pqm->process; - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, AMDGPU_MES_GANG_CTX_SIZE, &(*q)->gang_ctx_bo, @@ -224,7 +224,7 @@ cleanup: } int pqm_create_queue(struct process_queue_manager *pqm, - struct kfd_dev *dev, + struct kfd_node *dev, struct file *f, struct queue_properties *properties, unsigned int *qid, @@ -258,7 +258,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, * Hence we also check the type as well */ if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ)) - max_queues = dev->device_info.max_no_of_hqd/2; + max_queues = dev->kfd->device_info.max_no_of_hqd/2; if (pdd->qpd.queue_count >= max_queues) return -ENOSPC; @@ -354,7 +354,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, */ *p_doorbell_offset_in_process = (q->properties.doorbell_off * sizeof(uint32_t)) & - (kfd_doorbell_process_slice(dev) - 1); + (kfd_doorbell_process_slice(dev->kfd) - 1); pr_debug("PQM After DQM create queue\n"); @@ -387,7 +387,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) struct process_queue_node *pqn; struct kfd_process_device *pdd; struct device_queue_manager *dqm; - struct kfd_dev *dev; + struct kfd_node *dev; int retval; dqm = NULL; @@ -439,7 +439,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) pdd->qpd.num_gws = 0; } - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo); if (pqn->q->wptr_bo) @@ -859,7 +859,7 @@ int kfd_criu_restore_queue(struct kfd_process *p, } if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) { + kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) { ret = -ENOMEM; goto exit; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 0472b56de245..a0bf6558f4ac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -36,7 +36,7 @@ struct kfd_smi_client { wait_queue_head_t wait_queue; /* events enabled */ uint64_t events; - struct kfd_dev *dev; + struct kfd_node *dev; spinlock_t lock; struct rcu_head rcu; pid_t pid; @@ -149,7 +149,7 @@ static void kfd_smi_ev_client_free(struct rcu_head *p) static int kfd_smi_ev_release(struct inode *inode, struct file *filep) { struct kfd_smi_client *client = filep->private_data; - struct kfd_dev *dev = client->dev; + struct kfd_node *dev = client->dev; spin_lock(&dev->smi_lock); list_del_rcu(&client->list); @@ -171,7 +171,7 @@ static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client, return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event); } -static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev, +static void add_event_to_kfifo(pid_t pid, struct kfd_node *dev, unsigned int smi_event, char *event_msg, int len) { struct kfd_smi_client *client; @@ -196,7 +196,7 @@ static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev, } __printf(4, 5) -static void kfd_smi_event_add(pid_t pid, struct kfd_dev *dev, +static void kfd_smi_event_add(pid_t pid, struct kfd_node *dev, unsigned int event, char *fmt, ...) { char fifo_in[KFD_SMI_EVENT_MSG_SIZE]; @@ -215,7 +215,7 @@ static void kfd_smi_event_add(pid_t pid, struct kfd_dev *dev, add_event_to_kfifo(pid, dev, event, fifo_in, len); } -void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) +void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset) { unsigned int event; @@ -228,7 +228,7 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) kfd_smi_event_add(0, dev, event, "%x\n", dev->reset_seq_num); } -void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, +void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev, uint64_t throttle_bitmask) { kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, "%llx:%llx\n", @@ -236,7 +236,7 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, amdgpu_dpm_get_thermal_throttling_counter(dev->adev)); } -void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) +void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid) { struct amdgpu_task_info task_info; @@ -254,17 +254,17 @@ void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, unsigned long address, bool write_fault, ktime_t ts) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_PAGE_FAULT_START, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_START, "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid, - address, dev->id, write_fault ? 'W' : 'R'); + address, dev->node->id, write_fault ? 'W' : 'R'); } void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid, unsigned long address, bool migration) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_PAGE_FAULT_END, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_END, "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(), - pid, address, dev->id, migration ? 'M' : 'U'); + pid, address, dev->node->id, migration ? 'M' : 'U'); } void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, @@ -273,7 +273,7 @@ void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, uint32_t prefetch_loc, uint32_t preferred_loc, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_MIGRATE_START, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_START, "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, prefetch_loc, preferred_loc, trigger); @@ -283,7 +283,7 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_MIGRATE_END, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_END, "%lld -%d @%lx(%lx) %x->%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, trigger); @@ -292,16 +292,16 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_QUEUE_EVICTION, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_EVICTION, "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid, - dev->id, trigger); + dev->node->id, trigger); } void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_QUEUE_RESTORE, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_RESTORE, "%lld -%d %x\n", ktime_get_boottime_ns(), pid, - dev->id); + dev->node->id); } void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) @@ -328,12 +328,12 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid, unsigned long address, unsigned long last, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_UNMAP_FROM_GPU, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_UNMAP_FROM_GPU, "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(), - pid, address, last - address + 1, dev->id, trigger); + pid, address, last - address + 1, dev->node->id, trigger); } -int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) +int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) { struct kfd_smi_client *client; int ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index 76fe4e0ec2d2..59cd089f80d1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -24,11 +24,11 @@ #ifndef KFD_SMI_EVENTS_H_INCLUDED #define KFD_SMI_EVENTS_H_INCLUDED -int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd); -void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid); -void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, +int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd); +void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid); +void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev, uint64_t throttle_bitmask); -void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset); +void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset); void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, unsigned long address, bool write_fault, ktime_t ts); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index c02430537e9c..96ccff79902c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1266,7 +1266,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, return -EINVAL; } - kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid, + kfd_smi_event_unmap_from_gpu(pdd->dev->kfd, p->lead_thread->pid, start, last, trigger); r = svm_range_unmap_from_gpu(pdd->dev->adev, @@ -3083,7 +3083,7 @@ int svm_range_list_init(struct kfd_process *p) spin_lock_init(&svms->deferred_list_lock); for (i = 0; i < p->n_pdds; i++) - if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev)) + if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->kfd)) bitmap_set(svms->bitmap_supported, i, 1); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 8e4124dcb6e4..06a11186d947 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -96,7 +96,7 @@ struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id) return ret; } -struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) +struct kfd_node *kfd_device_by_id(uint32_t gpu_id) { struct kfd_topology_device *top_dev; @@ -107,10 +107,10 @@ struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) return top_dev->gpu; } -struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) +struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev) { struct kfd_topology_device *top_dev; - struct kfd_dev *device = NULL; + struct kfd_node *device = NULL; down_read(&topology_lock); @@ -125,10 +125,10 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) return device; } -struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev) +struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev) { struct kfd_topology_device *top_dev; - struct kfd_dev *device = NULL; + struct kfd_node *device = NULL; down_read(&topology_lock); @@ -526,7 +526,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, if (dev->gpu) { log_max_watch_addr = - __ilog2_u32(dev->gpu->device_info.num_of_watch_points); + __ilog2_u32(dev->gpu->kfd->device_info.num_of_watch_points); if (log_max_watch_addr) { dev->node_props.capability |= @@ -548,11 +548,11 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_64bit_prop(buffer, offs, "local_mem_size", 0ULL); sysfs_show_32bit_prop(buffer, offs, "fw_version", - dev->gpu->mec_fw_version); + dev->gpu->kfd->mec_fw_version); sysfs_show_32bit_prop(buffer, offs, "capability", dev->node_props.capability); sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", - dev->gpu->sdma_fw_version); + dev->gpu->kfd->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", dev->gpu->adev->unique_id); @@ -1157,7 +1157,7 @@ void kfd_topology_shutdown(void) up_write(&topology_lock); } -static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) +static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) { uint32_t hashout; uint32_t buf[7]; @@ -1167,8 +1167,8 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) if (!gpu) return 0; - local_mem_size = gpu->local_mem_info.local_mem_size_private + - gpu->local_mem_info.local_mem_size_public; + local_mem_size = gpu->kfd->local_mem_info.local_mem_size_private + + gpu->kfd->local_mem_info.local_mem_size_public; buf[0] = gpu->adev->pdev->devfn; buf[1] = gpu->adev->pdev->subsystem_vendor | (gpu->adev->pdev->subsystem_device << 16); @@ -1188,7 +1188,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) * list then return NULL. This means a new topology device has to * be created for this GPU. */ -static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) +static struct kfd_topology_device *kfd_assign_gpu(struct kfd_node *gpu) { struct kfd_topology_device *dev; struct kfd_topology_device *out_dev = NULL; @@ -1201,7 +1201,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) /* Discrete GPUs need their own topology device list * entries. Don't assign them to CPU/APU nodes. */ - if (!gpu->use_iommu_v2 && + if (!gpu->kfd->use_iommu_v2 && dev->node_props.cpu_cores_count) continue; @@ -1275,7 +1275,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev, CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; /* set gpu (dev) flags. */ } else { - if (!dev->gpu->pci_atomic_requested || + if (!dev->gpu->kfd->pci_atomic_requested || dev->gpu->adev->asic_type == CHIP_HAWAII) link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT | CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; @@ -1569,8 +1569,8 @@ static int kfd_dev_create_p2p_links(void) if (dev == new_dev) break; if (!dev->gpu || !dev->gpu->adev || - (dev->gpu->hive_id && - dev->gpu->hive_id == new_dev->gpu->hive_id)) + (dev->gpu->kfd->hive_id && + dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id)) goto next; /* check if node(s) is/are peer accessible in one direction or bi-direction */ @@ -1590,7 +1590,6 @@ out: return ret; } - /* Helper function. See kfd_fill_gpu_cache_info for parameter description */ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext, struct kfd_gpu_cache_info *pcache_info, @@ -1723,7 +1722,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, /* kfd_fill_cache_non_crat_info - Fill GPU cache info using kfd_gpu_cache_info * tables */ -static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_dev *kdev) +static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev) { struct kfd_gpu_cache_info *pcache_info = NULL; int i, j, k; @@ -1805,7 +1804,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct pr_debug("Added [%d] GPU cache entries\n", num_of_entries); } -static int kfd_topology_add_device_locked(struct kfd_dev *gpu, uint32_t gpu_id, +static int kfd_topology_add_device_locked(struct kfd_node *gpu, uint32_t gpu_id, struct kfd_topology_device **dev) { int proximity_domain = ++topology_crat_proximity_domain; @@ -1865,7 +1864,7 @@ err: return res; } -int kfd_topology_add_device(struct kfd_dev *gpu) +int kfd_topology_add_device(struct kfd_node *gpu) { uint32_t gpu_id; struct kfd_topology_device *dev; @@ -1916,7 +1915,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.simd_arrays_per_engine = cu_info.num_shader_arrays_per_engine; - dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version; + dev->node_props.gfx_target_version = + gpu->kfd->device_info.gfx_target_version; dev->node_props.vendor_id = gpu->adev->pdev->vendor; dev->node_props.device_id = gpu->adev->pdev->device; dev->node_props.capability |= @@ -1929,15 +1929,15 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.max_engine_clk_ccompute = cpufreq_quick_get_max(0) / 1000; dev->node_props.drm_render_minor = - gpu->shared_resources.drm_render_minor; + gpu->kfd->shared_resources.drm_render_minor; - dev->node_props.hive_id = gpu->hive_id; + dev->node_props.hive_id = gpu->kfd->hive_id; dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu); dev->node_props.num_sdma_xgmi_engines = kfd_get_num_xgmi_sdma_engines(gpu); dev->node_props.num_sdma_queues_per_engine = - gpu->device_info.num_sdma_queues_per_engine - - gpu->device_info.num_reserved_sdma_queues_per_engine; + gpu->kfd->device_info.num_sdma_queues_per_engine - + gpu->kfd->device_info.num_reserved_sdma_queues_per_engine; dev->node_props.num_gws = (dev->gpu->gws && dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? dev->gpu->adev->gds.gws_size : 0; @@ -1979,7 +1979,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) * Overwrite ATS capability according to needs_iommu_device to fix * potential missing corresponding bit in CRAT of BIOS. */ - if (dev->gpu->use_iommu_v2) + if (dev->gpu->kfd->use_iommu_v2) dev->node_props.capability |= HSA_CAP_ATS_PRESENT; else dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; @@ -2079,7 +2079,7 @@ static void kfd_topology_update_io_links(int proximity_domain) } } -int kfd_topology_remove_device(struct kfd_dev *gpu) +int kfd_topology_remove_device(struct kfd_node *gpu) { struct kfd_topology_device *dev, *tmp; uint32_t gpu_id; @@ -2119,7 +2119,7 @@ int kfd_topology_remove_device(struct kfd_dev *gpu) * Return - 0: On success (@kdev will be NULL for non GPU nodes) * -1: If end of list */ -int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev) +int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev) { struct kfd_topology_device *top_dev; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index fca30d00a9bb..3b8afb6aba79 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -75,7 +75,7 @@ struct kfd_mem_properties { uint32_t flags; uint32_t width; uint32_t mem_clk_max; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj; struct attribute attr; }; @@ -93,7 +93,7 @@ struct kfd_cache_properties { uint32_t cache_latency; uint32_t cache_type; uint8_t sibling_map[CACHE_SIBLINGMAP_SIZE]; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj; struct attribute attr; uint32_t sibling_map_size; @@ -113,7 +113,7 @@ struct kfd_iolink_properties { uint32_t max_bandwidth; uint32_t rec_transfer_size; uint32_t flags; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj; struct attribute attr; }; @@ -135,7 +135,7 @@ struct kfd_topology_device { struct list_head io_link_props; struct list_head p2p_link_props; struct list_head perf_props; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj_node; struct kobject *kobj_mem; struct kobject *kobj_cache; -- cgit v1.2.3 From ef75a6ef37235e211bbdb17c25e5f79c55df1750 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Thu, 3 Mar 2022 10:56:05 -0500 Subject: drm/amdkfd: Update coherence settings for svm ranges Recently introduced commit "drm/amdgpu: Set cache coherency for GC 9.4.3" did not update the settings applicable for svm ranges. Add the coherence settings for svm ranges for GFX IP 9.4.3. Reviewed-by: Amber Lin Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 17 +++++++++++++++++ include/uapi/linux/kfd_ioctl.h | 2 ++ 2 files changed, 19 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 96ccff79902c..4b4f3bf8b823 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1159,6 +1159,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, uint64_t pte_flags; bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN); bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT; + bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED; if (domain == SVM_RANGE_VRAM_DOMAIN) bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); @@ -1198,6 +1199,22 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } break; + case IP_VERSION(9, 4, 3): + //TODO: Need more work for handling multiple memory partitions + //e.g. NPS4. Current approch is only applicable without memory + //partitions. + snoop = true; + if (uncached) + mapping_flags |= AMDGPU_VM_MTYPE_UC; + /* local HBM region close to partition*/ + else if (bo_adev == adev) + mapping_flags |= AMDGPU_VM_MTYPE_RW; + /* local HBM region far from partition or remote XGMI GPU or + * system memory + */ + else + mapping_flags |= AMDGPU_VM_MTYPE_NC; + break; default: mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 2da5c3ad71bd..2a9671e1ddb5 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -623,6 +623,8 @@ enum kfd_mmio_remap { #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 /* Keep GPU memory mapping always valid as if XNACK is disable */ #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 +/* Uncached access to memory */ +#define KFD_IOCTL_SVM_FLAG_UNCACHED 0x00000080 /** * kfd_ioctl_svm_op - SVM ioctl operations -- cgit v1.2.3 From 5fb34bd9cf9e248d7e84e431a4a6b731334ab564 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Tue, 24 May 2022 10:22:12 -0500 Subject: drm/amdkfd: pass kfd_node ref to svm migration api This work is required for GC 9.4.3, previous to support memory partitions per node at SVM. When multiple partition is configured, every BO should be allocated inside one specific partition which corresponds to the current amdgpu_device and kfd_node. v2: squash in compilation fix (Alex) v3: squash in fix for pre-gfx 9.4.3 (Alex) v4: squash in best_loc fix (Alex) Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 33 ++++--- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 45 ++++----- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 30 +++++- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 152 +++++++++++++++---------------- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 21 +++-- 9 files changed, 166 insertions(+), 133 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c3964c14f215..c390b2856cc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2441,7 +2441,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * shouldn't be reported any more. */ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - uint64_t addr, bool write_fault) + u32 client_id, u32 node_id, uint64_t addr, + bool write_fault) { bool is_compute_context = false; struct amdgpu_bo *root; @@ -2465,8 +2466,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, addr /= AMDGPU_GPU_PAGE_SIZE; - if (is_compute_context && - !svm_range_restore_pages(adev, pasid, addr, write_fault)) { + if (is_compute_context && !svm_range_restore_pages(adev, pasid, client_id, + node_id, addr, write_fault)) { amdgpu_bo_unref(&root); return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 9f5d32b0fda1..dbab31647186 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -455,7 +455,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, struct amdgpu_task_info *task_info); bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - uint64_t addr, bool write_fault); + u32 client_id, u32 node_id, uint64_t addr, + bool write_fault); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index d76f5c8d4977..01bd45651382 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -139,7 +139,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, /* Try to handle the recoverable page faults by filling page * tables */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) + if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault)) return 1; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2c322a25bf1c..c5752a349f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -557,11 +557,24 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, u64 addr; uint32_t cam_index = 0; int ret; - uint32_t node_id; + uint32_t node_id = 0; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; + if (entry->client_id == SOC15_IH_CLIENTID_VMC) { + hub_name = "mmhub0"; + hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { + hub_name = "mmhub1"; + hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; + } else { + hub_name = "gfxhub0"; + node_id = (adev->ip_versions[GC_HWIP][0] == + IP_VERSION(9, 4, 3)) ? entry->node_id : 0; + hub = &adev->vmhub[node_id/2]; + } + if (retry_fault) { if (adev->irq.retry_cam_enabled) { /* Delegate it to a different ring if the hardware hasn't @@ -574,7 +587,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, cam_index = entry->src_data[2] & 0x3ff; - ret = amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault); + ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + addr, write_fault); WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); if (ret) return 1; @@ -596,7 +610,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, /* Try to handle the recoverable page faults by filling page * tables */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) + if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + addr, write_fault)) return 1; } } @@ -604,18 +619,6 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (!printk_ratelimit()) return 0; - if (entry->client_id == SOC15_IH_CLIENTID_VMC) { - hub_name = "mmhub0"; - hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; - } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { - hub_name = "mmhub1"; - hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; - } else { - hub_name = "gfxhub0"; - node_id = (adev->ip_versions[GC_HWIP][0] == - IP_VERSION(9, 4, 3)) ? entry->node_id : 0; - hub = &adev->vmhub[node_id/2]; - } memset(&task_info, 0, sizeof(struct amdgpu_task_info)); amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 5f4dc2a45bd0..e7e5abc32c84 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -287,11 +287,12 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) } static int -svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, +svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, dma_addr_t *scratch, uint64_t ttm_res_offset) { - uint64_t npages = migrate->npages; + uint64_t npages = migrate->cpages; + struct amdgpu_device *adev = node->adev; struct device *dev = adev->dev; struct amdgpu_res_cursor cursor; dma_addr_t *src; @@ -321,7 +322,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, DMA_TO_DEVICE); r = dma_mapping_error(dev, src[i]); if (r) { - dev_err(adev->dev, "%s: fail %d dma_map_page\n", + dev_err(dev, "%s: fail %d dma_map_page\n", __func__, r); goto out_free_vram_pages; } @@ -390,12 +391,13 @@ out_free_vram_pages: } static long -svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, +svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end, uint32_t trigger, uint64_t ttm_res_offset) { struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); uint64_t npages = (end - start) >> PAGE_SHIFT; + struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate = { 0 }; @@ -445,7 +447,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, else pr_debug("0x%lx pages migrated\n", cpages); - r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset); + r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); migrate_vma_pages(&migrate); pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", @@ -465,7 +467,7 @@ out_free: kvfree(buf); out: if (!r && cpages) { - pdd = svm_range_get_pdd_by_adev(prange, adev); + pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); @@ -492,8 +494,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, { unsigned long addr, start, end; struct vm_area_struct *vma; - struct amdgpu_device *adev; uint64_t ttm_res_offset; + struct kfd_node *node; unsigned long cpages = 0; long r = 0; @@ -503,9 +505,9 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, return 0; } - adev = svm_range_get_adev_by_id(prange, best_loc); - if (!adev) { - pr_debug("failed to get device by id 0x%x\n", best_loc); + node = svm_range_get_node_by_id(prange, best_loc); + if (!node) { + pr_debug("failed to get kfd node by id 0x%x\n", best_loc); return -ENODEV; } @@ -515,9 +517,9 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, start = prange->start << PAGE_SHIFT; end = (prange->last + 1) << PAGE_SHIFT; - r = svm_range_vram_node_new(adev, prange, true); + r = svm_range_vram_node_new(node, prange, true); if (r) { - dev_dbg(adev->dev, "fail %ld to alloc vram\n", r); + dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r); return r; } ttm_res_offset = prange->offset << PAGE_SHIFT; @@ -530,7 +532,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, break; next = min(vma->vm_end, end); - r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset); + r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset); if (r < 0) { pr_debug("failed %ld to migrate\n", r); break; @@ -663,7 +665,7 @@ out_oom: * positive values - partial migration, number of pages not migrated */ static long -svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, +svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end, uint32_t trigger, struct page *fault_page) { @@ -671,6 +673,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, uint64_t npages = (end - start) >> PAGE_SHIFT; unsigned long upages = npages; unsigned long cpages = 0; + struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate = { 0 }; @@ -745,7 +748,7 @@ out_free: kvfree(buf); out: if (!r && cpages) { - pdd = svm_range_get_pdd_by_adev(prange, adev); + pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); } @@ -766,7 +769,7 @@ out: int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, uint32_t trigger, struct page *fault_page) { - struct amdgpu_device *adev; + struct kfd_node *node; struct vm_area_struct *vma; unsigned long addr; unsigned long start; @@ -780,13 +783,11 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, return 0; } - adev = svm_range_get_adev_by_id(prange, prange->actual_loc); - if (!adev) { - pr_debug("failed to get device by id 0x%x\n", - prange->actual_loc); + node = svm_range_get_node_by_id(prange, prange->actual_loc); + if (!node) { + pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc); return -ENODEV; } - pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", prange->svms, prange, prange->start, prange->last, prange->actual_loc); @@ -805,7 +806,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, } next = min(vma->vm_end, end); - r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger, + r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger, fault_page); if (r < 0) { pr_debug("failed %ld to migrate prange %p\n", r, prange); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 400b4dcbdf05..df372de6b056 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -289,6 +289,7 @@ struct kfd_node { * from the HW ring into a SW ring. */ bool interrupts_active; + uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */ /* QCM Device instance */ struct device_queue_manager *dqm; @@ -971,9 +972,8 @@ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); -int kfd_process_gpuid_from_adev(struct kfd_process *p, - struct amdgpu_device *adev, uint32_t *gpuid, - uint32_t *gpuidx); +int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, + uint32_t *gpuid, uint32_t *gpuidx); static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, uint32_t gpuidx, uint32_t *gpuid) { return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; @@ -1073,6 +1073,30 @@ struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); +static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t client_id, + uint32_t node_id) +{ + if ((node->interrupt_bitmap & (0x1U << node_id)) || + ((node_id % 4) == 0 && + (node->interrupt_bitmap >> 16) & (0x1U << client_id))) + return true; + + return false; +} +static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, + uint32_t client_id, uint32_t node_id) { + struct kfd_dev *dev = adev->kfd.dev; + uint32_t i; + + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) + return dev->nodes[0]; + + for (i = 0; i < dev->num_nodes; i++) + if (kfd_irq_is_from_node(dev->nodes[i], client_id, node_id)) + return dev->nodes[i]; + + return NULL; +} int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index c3d43e6e5236..666815b227a8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1891,13 +1891,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) } int -kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev, - uint32_t *gpuid, uint32_t *gpuidx) +kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, + uint32_t *gpuid, uint32_t *gpuidx) { int i; for (i = 0; i < p->n_pdds; i++) - if (p->pdds[i] && p->pdds[i]->dev->adev == adev) { + if (p->pdds[i] && p->pdds[i]->dev == node) { *gpuid = p->pdds[i]->user_gpu_id; *gpuidx = i; return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 4b4f3bf8b823..639831fbb6ca 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -170,8 +170,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, page = hmm_pfn_to_page(hmm_pfns[i]); if (is_zone_device_page(page)) { - struct amdgpu_device *bo_adev = - amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); + struct amdgpu_device *bo_adev = prange->svm_bo->node->adev; addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + bo_adev->vm_manager.vram_base_offset - @@ -424,10 +423,8 @@ static void svm_range_bo_unref(struct svm_range_bo *svm_bo) } static bool -svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange) +svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange) { - struct amdgpu_device *bo_adev; - mutex_lock(&prange->lock); if (!prange->svm_bo) { mutex_unlock(&prange->lock); @@ -440,12 +437,11 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange) } if (svm_bo_ref_unless_zero(prange->svm_bo)) { /* - * Migrate from GPU to GPU, remove range from source bo_adev - * svm_bo range list, and return false to allocate svm_bo from - * destination adev. + * Migrate from GPU to GPU, remove range from source svm_bo->node + * range list, and return false to allocate svm_bo from destination + * node. */ - bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); - if (bo_adev != adev) { + if (prange->svm_bo->node != node) { mutex_unlock(&prange->lock); spin_lock(&prange->svm_bo->list_lock); @@ -513,7 +509,7 @@ static struct svm_range_bo *svm_range_bo_new(void) } int -svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, +svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear) { struct amdgpu_bo_param bp; @@ -528,7 +524,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms, prange->start, prange->last); - if (svm_range_validate_svm_bo(adev, prange)) + if (svm_range_validate_svm_bo(node, prange)) return 0; svm_bo = svm_range_bo_new(); @@ -542,6 +538,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, kfree(svm_bo); return -ESRCH; } + svm_bo->node = node; svm_bo->eviction_fence = amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), mm, @@ -559,7 +556,10 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, bp.type = ttm_bo_type_device; bp.resv = NULL; - r = amdgpu_bo_create_user(adev, &bp, &ubo); + /* TODO: Allocate memory from the right memory partition. We can sort + * out the details later, once basic memory partitioning is working + */ + r = amdgpu_bo_create_user(node->adev, &bp, &ubo); if (r) { pr_debug("failed %d to create bo\n", r); goto create_bo_failed; @@ -617,45 +617,30 @@ void svm_range_vram_node_free(struct svm_range *prange) prange->ttm_res = NULL; } -struct amdgpu_device * -svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id) +struct kfd_node * +svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id) { - struct kfd_process_device *pdd; struct kfd_process *p; - int32_t gpu_idx; + struct kfd_process_device *pdd; p = container_of(prange->svms, struct kfd_process, svms); - - gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id); - if (gpu_idx < 0) { - pr_debug("failed to get device by id 0x%x\n", gpu_id); - return NULL; - } - pdd = kfd_process_device_from_gpuidx(p, gpu_idx); + pdd = kfd_process_device_data_by_id(p, gpu_id); if (!pdd) { - pr_debug("failed to get device by idx 0x%x\n", gpu_idx); + pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id); return NULL; } - return pdd->dev->adev; + return pdd->dev; } struct kfd_process_device * -svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev) +svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node) { struct kfd_process *p; - int32_t gpu_idx, gpuid; - int r; p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx); - if (r) { - pr_debug("failed to get device id by adev %p\n", adev); - return NULL; - } - - return kfd_process_device_from_gpuidx(p, gpu_idx); + return kfd_get_process_device_data(node, p); } static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo) @@ -1148,12 +1133,18 @@ svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, } return 0; } +static bool +svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b) +{ + return (node_a->adev == node_b->adev || + amdgpu_xgmi_same_hive(node_a->adev, node_b->adev)); +} static uint64_t -svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, - int domain) +svm_range_get_pte_flags(struct kfd_node *node, + struct svm_range *prange, int domain) { - struct amdgpu_device *bo_adev; + struct kfd_node *bo_node; uint32_t flags = prange->flags; uint32_t mapping_flags = 0; uint64_t pte_flags; @@ -1162,18 +1153,18 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED; if (domain == SVM_RANGE_VRAM_DOMAIN) - bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); + bo_node = prange->svm_bo->node; - switch (KFD_GC_VERSION(adev->kfd.dev)) { + switch (node->adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 1): if (domain == SVM_RANGE_VRAM_DOMAIN) { - if (bo_adev == adev) { + if (bo_node == node) { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; } else { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; - if (amdgpu_xgmi_same_hive(adev, bo_adev)) + if (svm_nodes_in_same_hive(node, bo_node)) snoop = true; } } else { @@ -1183,15 +1174,15 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, break; case IP_VERSION(9, 4, 2): if (domain == SVM_RANGE_VRAM_DOMAIN) { - if (bo_adev == adev) { + if (bo_node == node) { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; - if (adev->gmc.xgmi.connected_to_cpu) + if (node->adev->gmc.xgmi.connected_to_cpu) snoop = true; } else { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; - if (amdgpu_xgmi_same_hive(adev, bo_adev)) + if (svm_nodes_in_same_hive(node, bo_node)) snoop = true; } } else { @@ -1207,7 +1198,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, if (uncached) mapping_flags |= AMDGPU_VM_MTYPE_UC; /* local HBM region close to partition*/ - else if (bo_adev == adev) + else if (bo_node == node) mapping_flags |= AMDGPU_VM_MTYPE_RW; /* local HBM region far from partition or remote XGMI GPU or * system memory @@ -1231,7 +1222,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM; pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; - pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags); + pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags); return pte_flags; } @@ -1338,7 +1329,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n", last_start, prange->start + i, last_domain ? "GPU" : "CPU"); - pte_flags = svm_range_get_pte_flags(adev, prange, last_domain); + pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain); if (readonly) pte_flags &= ~AMDGPU_PTE_WRITEABLE; @@ -1347,6 +1338,9 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0, pte_flags); + /* TODO: we still need to determine the vm_manager.vram_base_offset based on + * the memory partition. + */ r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, last_start, prange->start + i, pte_flags, @@ -1384,16 +1378,14 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, unsigned long *bitmap, bool wait, bool flush_tlb) { struct kfd_process_device *pdd; - struct amdgpu_device *bo_adev; + struct amdgpu_device *bo_adev = NULL; struct kfd_process *p; struct dma_fence *fence = NULL; uint32_t gpuidx; int r = 0; if (prange->svm_bo && prange->ttm_res) - bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); - else - bo_adev = NULL; + bo_adev = prange->svm_bo->node->adev; p = container_of(prange->svms, struct kfd_process, svms); for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { @@ -2526,17 +2518,17 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, */ static int32_t svm_range_best_restore_location(struct svm_range *prange, - struct amdgpu_device *adev, + struct kfd_node *node, int32_t *gpuidx) { - struct amdgpu_device *bo_adev, *preferred_adev; + struct kfd_node *bo_node, *preferred_node; struct kfd_process *p; uint32_t gpuid; int r; p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx); + r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx); if (r < 0) { pr_debug("failed to get gpuid from kgd\n"); return -1; @@ -2546,9 +2538,8 @@ svm_range_best_restore_location(struct svm_range *prange, prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { return prange->preferred_loc; } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) { - preferred_adev = svm_range_get_adev_by_id(prange, - prange->preferred_loc); - if (amdgpu_xgmi_same_hive(adev, preferred_adev)) + preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc); + if (preferred_node && svm_nodes_in_same_hive(node, preferred_node)) return prange->preferred_loc; /* fall through */ } @@ -2560,8 +2551,8 @@ svm_range_best_restore_location(struct svm_range *prange, if (!prange->actual_loc) return 0; - bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc); - if (amdgpu_xgmi_same_hive(adev, bo_adev)) + bo_node = svm_range_get_node_by_id(prange, prange->actual_loc); + if (bo_node && svm_nodes_in_same_hive(node, bo_node)) return prange->actual_loc; else return 0; @@ -2678,7 +2669,7 @@ svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last, } static struct -svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, +svm_range *svm_range_create_unregistered_range(struct kfd_node *node, struct kfd_process *p, struct mm_struct *mm, int64_t addr) @@ -2713,7 +2704,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, pr_debug("Failed to create prange in address [0x%llx]\n", addr); return NULL; } - if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) { + if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) { pr_debug("failed to get gpuid from kgd\n"); svm_range_free(prange, true); return NULL; @@ -2767,7 +2758,7 @@ static bool svm_range_skip_recover(struct svm_range *prange) } static void -svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, +svm_range_count_fault(struct kfd_node *node, struct kfd_process *p, int32_t gpuidx) { struct kfd_process_device *pdd; @@ -2780,7 +2771,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, uint32_t gpuid; int r; - r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx); + r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx); if (r < 0) return; } @@ -2808,6 +2799,7 @@ svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, + uint32_t client_id, uint32_t node_id, uint64_t addr, bool write_fault) { struct mm_struct *mm = NULL; @@ -2815,6 +2807,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, struct svm_range *prange; struct kfd_process *p; ktime_t timestamp = ktime_get_boottime(); + struct kfd_node *node; int32_t best_loc; int32_t gpuidx = MAX_GPU_INSTANCE; bool write_locked = false; @@ -2858,6 +2851,13 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, goto out; } + node = kfd_node_by_irq_ids(adev, node_id, client_id); + if (!node) { + pr_debug("kfd node does not exist node_id: %d, client_id: %d\n", node_id, + client_id); + r = -EFAULT; + goto out; + } mmap_read_lock(mm); retry_write_locked: mutex_lock(&svms->lock); @@ -2876,7 +2876,7 @@ retry_write_locked: write_locked = true; goto retry_write_locked; } - prange = svm_range_create_unregistered_range(adev, p, mm, addr); + prange = svm_range_create_unregistered_range(node, p, mm, addr); if (!prange) { pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n", svms, addr); @@ -2891,7 +2891,7 @@ retry_write_locked: mutex_lock(&prange->migrate_mutex); if (svm_range_skip_recover(prange)) { - amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid); r = 0; goto out_unlock_range; } @@ -2922,7 +2922,7 @@ retry_write_locked: goto out_unlock_range; } - best_loc = svm_range_best_restore_location(prange, adev, &gpuidx); + best_loc = svm_range_best_restore_location(prange, node, &gpuidx); if (best_loc == -1) { pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n", svms, prange->start, prange->last); @@ -2981,7 +2981,7 @@ out_unlock_svms: mutex_unlock(&svms->lock); mmap_read_unlock(mm); - svm_range_count_fault(adev, p, gpuidx); + svm_range_count_fault(node, p, gpuidx); mmput(mm); out: @@ -2989,7 +2989,7 @@ out: if (r == -EAGAIN) { pr_debug("recover vm fault later\n"); - amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid); r = 0; } return r; @@ -3231,7 +3231,7 @@ svm_range_best_prefetch_location(struct svm_range *prange) DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); uint32_t best_loc = prange->prefetch_loc; struct kfd_process_device *pdd; - struct amdgpu_device *bo_adev; + struct kfd_node *bo_node; struct kfd_process *p; uint32_t gpuidx; @@ -3240,9 +3240,9 @@ svm_range_best_prefetch_location(struct svm_range *prange) if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) goto out; - bo_adev = svm_range_get_adev_by_id(prange, best_loc); - if (!bo_adev) { - WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc); + bo_node = svm_range_get_node_by_id(prange, best_loc); + if (!bo_node) { + WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc); best_loc = 0; goto out; } @@ -3260,10 +3260,10 @@ svm_range_best_prefetch_location(struct svm_range *prange) continue; } - if (pdd->dev->adev == bo_adev) + if (pdd->dev->adev == bo_node->adev) continue; - if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { + if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) { best_loc = 0; break; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 7a33b93f9df6..a165c73b40b2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -48,6 +48,7 @@ struct svm_range_bo { struct work_struct eviction_work; uint32_t evicting; struct work_struct release_work; + struct kfd_node *node; }; enum svm_work_list_ops { @@ -163,16 +164,17 @@ int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, struct svm_range *svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, struct svm_range **parent); -struct amdgpu_device *svm_range_get_adev_by_id(struct svm_range *prange, - uint32_t id); -int svm_range_vram_node_new(struct amdgpu_device *adev, - struct svm_range *prange, bool clear); +struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange, + uint32_t gpu_id); +int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, + bool clear); void svm_range_vram_node_free(struct svm_range *prange); int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, unsigned long addr, struct svm_range *parent, struct svm_range *prange); -int svm_range_restore_pages(struct amdgpu_device *adev, - unsigned int pasid, uint64_t addr, bool write_fault); +int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, + uint32_t client_id, uint32_t node_id, uint64_t addr, + bool write_fault); int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); void svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, struct mm_struct *mm, @@ -192,7 +194,7 @@ int kfd_criu_restore_svm(struct kfd_process *p, uint64_t max_priv_data_size); int kfd_criu_resume_svm(struct kfd_process *p); struct kfd_process_device * -svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev); +svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node); void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm); /* SVM API and HMM page migration work together, device memory type @@ -219,8 +221,9 @@ static inline void svm_range_list_fini(struct kfd_process *p) } static inline int svm_range_restore_pages(struct amdgpu_device *adev, - unsigned int pasid, uint64_t addr, - bool write_fault) + unsigned int pasid, + uint32_t client_id, uint32_t node_id, + uint64_t addr, bool write_fault) { return -EFAULT; } -- cgit v1.2.3 From d6e924ad85a0cebc9e39eb956a23386ce32cc9f9 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 9 Aug 2022 14:56:53 -0400 Subject: drm/amdkfd: Update SMI events for GFX9.4.3 On GFX 9.4.3, there can be multiple KFD nodes. As a result, SMI events for SVM, queue evict/restore should be raised for each node independently. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 16 ++++++------ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 6 ++--- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 38 ++++++++++++++--------------- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h | 14 +++++------ drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 6 ++--- 5 files changed, 40 insertions(+), 40 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index e7e5abc32c84..42e599912e52 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -423,9 +423,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, migrate.dst = migrate.src + npages; scratch = (dma_addr_t *)(migrate.dst + npages); - kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, + kfd_smi_event_migration_start(node, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->nodes[0]->id, prange->prefetch_loc, + 0, node->id, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -456,9 +456,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); - kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, + kfd_smi_event_migration_end(node, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->nodes[0]->id, trigger); + 0, node->id, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); svm_range_free_dma_mappings(prange); @@ -702,9 +702,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, migrate.fault_page = fault_page; scratch = (dma_addr_t *)(migrate.dst + npages); - kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, + kfd_smi_event_migration_start(node, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->nodes[0]->id, 0, prange->prefetch_loc, + node->id, 0, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -738,9 +738,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); - kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, + kfd_smi_event_migration_end(node, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->nodes[0]->id, 0, trigger); + node->id, 0, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 666815b227a8..a6ff57f11472 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1817,7 +1817,7 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; - kfd_smi_event_queue_eviction(pdd->dev->kfd, p->lead_thread->pid, + kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid, trigger); r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, @@ -1845,7 +1845,7 @@ fail: if (n_evicted == 0) break; - kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid); + kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, &pdd->qpd)) @@ -1866,7 +1866,7 @@ int kfd_process_restore_queues(struct kfd_process *p) for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; - kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid); + kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, &pdd->qpd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index b703da59e067..d9953c2b2661 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -250,58 +250,58 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid) task_info.pid, task_info.task_name); } -void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid, unsigned long address, bool write_fault, ktime_t ts) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_START, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_START, "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid, - address, dev->nodes[0]->id, write_fault ? 'W' : 'R'); + address, node->id, write_fault ? 'W' : 'R'); } -void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid, unsigned long address, bool migration) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_END, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_END, "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(), - pid, address, dev->nodes[0]->id, migration ? 'M' : 'U'); + pid, address, node->id, migration ? 'M' : 'U'); } -void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t prefetch_loc, uint32_t preferred_loc, uint32_t trigger) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_START, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_START, "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, prefetch_loc, preferred_loc, trigger); } -void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t trigger) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_END, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END, "%lld -%d @%lx(%lx) %x->%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, trigger); } -void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid, uint32_t trigger) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_EVICTION, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_EVICTION, "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid, - dev->nodes[0]->id, trigger); + node->id, trigger); } -void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid) +void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_RESTORE, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE, "%lld -%d %x\n", ktime_get_boottime_ns(), pid, - dev->nodes[0]->id); + node->id); } void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) @@ -324,13 +324,13 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) kfd_unref_process(p); } -void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid, unsigned long address, unsigned long last, uint32_t trigger) { - kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_UNMAP_FROM_GPU, + kfd_smi_event_add(pid, node, KFD_SMI_EVENT_UNMAP_FROM_GPU, "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(), - pid, address, last - address + 1, dev->nodes[0]->id, trigger); + pid, address, last - address + 1, node->id, trigger); } int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index 59cd089f80d1..fa95c2dfd587 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -29,24 +29,24 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid); void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev, uint64_t throttle_bitmask); void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset); -void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid, unsigned long address, bool write_fault, ktime_t ts); -void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid, unsigned long address, bool migration); -void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t prefetch_loc, uint32_t preferred_loc, uint32_t trigger); -void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t trigger); -void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid, uint32_t trigger); -void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid); +void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid); void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm); -void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid, +void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid, unsigned long address, unsigned long last, uint32_t trigger); #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 639831fbb6ca..0dafbbe954ca 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1274,7 +1274,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, return -EINVAL; } - kfd_smi_event_unmap_from_gpu(pdd->dev->kfd, p->lead_thread->pid, + kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid, start, last, trigger); r = svm_range_unmap_from_gpu(pdd->dev->adev, @@ -2934,7 +2934,7 @@ retry_write_locked: svms, prange->start, prange->last, best_loc, prange->actual_loc); - kfd_smi_event_page_fault_start(adev->kfd.dev, p->lead_thread->pid, addr, + kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr, write_fault, timestamp); if (prange->actual_loc != best_loc) { @@ -2972,7 +2972,7 @@ retry_write_locked: pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n", r, svms, prange->start, prange->last); - kfd_smi_event_page_fault_end(adev->kfd.dev, p->lead_thread->pid, addr, + kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr, migration); out_unlock_range: -- cgit v1.2.3 From f5fe7edfd6ce62cd23fbd707e7f9fe0f56a45e94 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 30 Sep 2022 09:16:21 -0400 Subject: drm/amdkfd: Update interrupt handling for GFX9.4.3 Update interrupt handling in CPX mode for GFX9.4.3 by using the VMID space instead of SDMA client id to determine if an interrupt should be processed by a KFD node. This is especially needed for handling retry faults from MMHUB. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 16 ++++++---------- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 ++++---- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 2 +- 6 files changed, 19 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c390b2856cc9..22a900f298f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2434,6 +2434,9 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * amdgpu_vm_handle_fault - graceful handling of VM faults. * @adev: amdgpu device pointer * @pasid: PASID of the VM + * @vmid: VMID, only used for GFX 9.4.3. + * @node_id: Node_id received in IH cookie. Only applicable for + * GFX 9.4.3. * @addr: Address of the fault * @write_fault: true is write fault, false is read fault * @@ -2441,7 +2444,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * shouldn't be reported any more. */ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - u32 client_id, u32 node_id, uint64_t addr, + u32 vmid, u32 node_id, uint64_t addr, bool write_fault) { bool is_compute_context = false; @@ -2466,7 +2469,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, addr /= AMDGPU_GPU_PAGE_SIZE; - if (is_compute_context && !svm_range_restore_pages(adev, pasid, client_id, + if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid, node_id, addr, write_fault)) { amdgpu_bo_unref(&root); return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index dbab31647186..8add5f5eb92a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -455,7 +455,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, struct amdgpu_task_info *task_info); bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - u32 client_id, u32 node_id, uint64_t addr, + u32 vmid, u32 node_id, uint64_t addr, bool write_fault); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c5752a349f3d..f2814270da40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -587,7 +587,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, cam_index = entry->src_data[2] & 0x3ff; - ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, addr, write_fault); WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); if (ret) @@ -610,7 +610,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, /* Try to handle the recoverable page faults by filling page * tables */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, addr, write_fault)) return 1; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index df372de6b056..fb3cf2c51da8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1073,18 +1073,14 @@ struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); -static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t client_id, - uint32_t node_id) +static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id, + uint32_t vmid) { - if ((node->interrupt_bitmap & (0x1U << node_id)) || - ((node_id % 4) == 0 && - (node->interrupt_bitmap >> 16) & (0x1U << client_id))) - return true; - - return false; + return (node->interrupt_bitmap & (1 << node_id)) != 0 && + (node->compute_vmid_bitmap & (1 << vmid)) != 0; } static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, - uint32_t client_id, uint32_t node_id) { + uint32_t node_id, uint32_t vmid) { struct kfd_dev *dev = adev->kfd.dev; uint32_t i; @@ -1092,7 +1088,7 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, return dev->nodes[0]; for (i = 0; i < dev->num_nodes; i++) - if (kfd_irq_is_from_node(dev->nodes[i], client_id, node_id)) + if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid)) return dev->nodes[i]; return NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 0dafbbe954ca..5d6e02559d8e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2799,7 +2799,7 @@ svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, - uint32_t client_id, uint32_t node_id, + uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault) { struct mm_struct *mm = NULL; @@ -2851,10 +2851,10 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, goto out; } - node = kfd_node_by_irq_ids(adev, node_id, client_id); + node = kfd_node_by_irq_ids(adev, node_id, vmid); if (!node) { - pr_debug("kfd node does not exist node_id: %d, client_id: %d\n", node_id, - client_id); + pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id, + vmid); r = -EFAULT; goto out; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index a165c73b40b2..5116786718b6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -173,7 +173,7 @@ int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, unsigned long addr, struct svm_range *parent, struct svm_range *prange); int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, - uint32_t client_id, uint32_t node_id, uint64_t addr, + uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault); int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); void svm_range_add_list_work(struct svm_range_list *svms, -- cgit v1.2.3 From 85b45b60722f506322393320bb6cc195378f2e4f Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Mon, 28 Nov 2022 11:26:02 -0500 Subject: amd/amdgpu: Set MTYPE_UC for access over PCIe For GFX v9_4_3, set MTYPE_UC for memory access over PCIe. v4 - add missing indentation pointed out by Felix and add his reviewed-by tag. v3 - add missing logic for the svm path. v2 - add amdgpu_xgmi_same_hive to separate access over xgmi from pcie Reviewed-by: Felix Kuehling Signed-off-by: Amber Lin Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 5d6e02559d8e..6daba0582bf3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1195,16 +1195,29 @@ svm_range_get_pte_flags(struct kfd_node *node, //e.g. NPS4. Current approch is only applicable without memory //partitions. snoop = true; - if (uncached) + if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; - /* local HBM region close to partition*/ - else if (bo_node == node) - mapping_flags |= AMDGPU_VM_MTYPE_RW; - /* local HBM region far from partition or remote XGMI GPU or - * system memory - */ - else + } else if (domain == SVM_RANGE_VRAM_DOMAIN) { + /* local HBM region close to partition with a workaround + * for Endpoint systems. + */ + if (bo_node == node) + mapping_flags |= + (node->adev->flags & AMD_IS_APU) ? + AMDGPU_VM_MTYPE_RW : AMDGPU_VM_MTYPE_NC; + /* local HBM region far from partition or remote XGMI GPU */ + else if (svm_nodes_in_same_hive(bo_node, node)) + mapping_flags |= AMDGPU_VM_MTYPE_NC; + /* PCIe P2P */ + else + mapping_flags |= AMDGPU_VM_MTYPE_UC; + /* system memory accessed by the APU */ + } else if (node->adev->flags & AMD_IS_APU) { mapping_flags |= AMDGPU_VM_MTYPE_NC; + /* system memory accessed by the dGPU */ + } else { + mapping_flags |= AMDGPU_VM_MTYPE_UC; + } break; default: mapping_flags |= coherent ? -- cgit v1.2.3 From d839a158b2480814bc438f9f46f440a7b9f63cb6 Mon Sep 17 00:00:00 2001 From: Graham Sider Date: Thu, 5 Jan 2023 10:58:07 -0500 Subject: drm/amdgpu: Correct dGPU MTYPE settings for gfx943 Revert temporary dGPU VRAM MTYPE setting and align with expected coherency protocol. Signed-off-by: Graham Sider Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 15 +++++---------- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 ++------ 2 files changed, 7 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index aca8489635b8..b6c500be6f70 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1238,17 +1238,12 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, mtype = MTYPE_RW; } else { /* dGPU */ - /* - if ((mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) && - bo_adev == adev) - mapping_flags |= AMDGPU_VM_MTYPE_RW; + if (is_vram && bo_adev == adev) + mtype = MTYPE_RW; + else if (is_vram) + mtype = MTYPE_NC; else - */ - /* Temporarily comment out above lines and use MTYPE_NC - * on both VRAM and system memory access until - * MTYPE_RW can properly work on VRAM access - */ - mtype = MTYPE_NC; + mtype = MTYPE_UC; } break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 6daba0582bf3..2b79849ddd30 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1198,13 +1198,9 @@ svm_range_get_pte_flags(struct kfd_node *node, if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; } else if (domain == SVM_RANGE_VRAM_DOMAIN) { - /* local HBM region close to partition with a workaround - * for Endpoint systems. - */ + /* local HBM region close to partition */ if (bo_node == node) - mapping_flags |= - (node->adev->flags & AMD_IS_APU) ? - AMDGPU_VM_MTYPE_RW : AMDGPU_VM_MTYPE_NC; + mapping_flags |= AMDGPU_VM_MTYPE_RW; /* local HBM region far from partition or remote XGMI GPU */ else if (svm_nodes_in_same_hive(bo_node, node)) mapping_flags |= AMDGPU_VM_MTYPE_NC; -- cgit v1.2.3 From f4d8b6f5c61ab5e98258bd0072d733741c76bd8d Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 31 Jan 2023 11:23:50 -0500 Subject: drm/amdkfd: Enable SVM on Native mode This patch enables SVM capability on GFX9.4.3 when run in Native mode. It also sets best_prefetch and best_restore locations to CPU as there is no VRAM. Signed-off-by: Mukul Joshi Acked-by: Rajneesh Bhardwaj Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 ++++++++ drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 2b79849ddd30..cf354f9e4285 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2543,6 +2543,9 @@ svm_range_best_restore_location(struct svm_range *prange, return -1; } + if (node->adev->gmc.is_app_apu) + return 0; + if (prange->preferred_loc == gpuid || prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { return prange->preferred_loc; @@ -3256,6 +3259,11 @@ svm_range_best_prefetch_location(struct svm_range *prange) goto out; } + if (bo_node->adev->gmc.is_app_apu) { + best_loc = 0; + goto out; + } + if (p->xnack_enabled) bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); else diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 5116786718b6..7515ddade3ae 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -200,7 +200,8 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s /* SVM API and HMM page migration work together, device memory type * is initialized to not 0 when page migration register device memory. */ -#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0) +#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0 ||\ + (dev)->adev->gmc.is_app_apu) void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); -- cgit v1.2.3 From 610dab118ff5013d46069c828b58d576e0907b66 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Fri, 31 Mar 2023 11:13:40 -0400 Subject: drm/amdkfd: Move pgmap to amdgpu_kfd_dev structure VRAM pgmap resource is allocated every time when switching compute partitions because kfd_dev is re-initialized by post_partition_switch, As a result, it causes memory region resource leaking and system memory usage accounting unbalanced. pgmap resource should be allocated and registered only once when loading driver and freed when unloading driver, move it from kfd_dev to amdgpu_kfd_dev. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 4 ++++ drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 ++++---- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 6 +++--- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 2 +- 6 files changed, 14 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index d1d643a050a1..e4e1dbba060a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include "amdgpu_sync.h" @@ -101,6 +102,9 @@ struct amdgpu_kfd_dev { uint64_t vram_used_aligned; bool init_complete; struct work_struct reset_work; + + /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ + struct dev_pagemap pgmap; }; enum kgd_engine_type { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 42e599912e52..199d32c7c289 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -206,7 +206,7 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence) unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr) { - return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT; + return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT; } static void @@ -236,7 +236,7 @@ svm_migrate_addr(struct amdgpu_device *adev, struct page *page) unsigned long addr; addr = page_to_pfn(page) << PAGE_SHIFT; - return (addr - adev->kfd.dev->pgmap.range.start); + return (addr - adev->kfd.pgmap.range.start); } static struct page * @@ -990,14 +990,14 @@ static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { int svm_migrate_init(struct amdgpu_device *adev) { - struct kfd_dev *kfddev = adev->kfd.dev; + struct amdgpu_kfd_dev *kfddev = &adev->kfd; struct dev_pagemap *pgmap; struct resource *res = NULL; unsigned long size; void *r; /* Page migration works on Vega10 or newer */ - if (!KFD_IS_SOC15(kfddev)) + if (!KFD_IS_SOC15(kfddev->dev)) return -EINVAL; pgmap = &kfddev->pgmap; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 02a90fd7f646..214d950f948e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -378,9 +378,6 @@ struct kfd_dev { int noretry; - /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ - struct dev_pagemap pgmap; - struct kfd_node *nodes[MAX_KFD_NODES]; unsigned int num_nodes; }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index cf354f9e4285..2b2129dd1e4a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -174,7 +174,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + bo_adev->vm_manager.vram_base_offset - - bo_adev->kfd.dev->pgmap.range.start; + bo_adev->kfd.pgmap.range.start; addr[i] |= SVM_RANGE_VRAM_DOMAIN; pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]); continue; @@ -2827,7 +2827,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, bool migration = false; int r = 0; - if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) { + if (!KFD_IS_SVM_API_SUPPORTED(adev)) { pr_debug("device does not support SVM\n"); return -EFAULT; } @@ -3112,7 +3112,7 @@ int svm_range_list_init(struct kfd_process *p) spin_lock_init(&svms->deferred_list_lock); for (i = 0; i < p->n_pdds; i++) - if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->kfd)) + if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev)) bitmap_set(svms->bitmap_supported, i, 1); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 7515ddade3ae..021def496f5a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -200,8 +200,8 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s /* SVM API and HMM page migration work together, device memory type * is initialized to not 0 when page migration register device memory. */ -#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0 ||\ - (dev)->adev->gmc.is_app_apu) +#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\ + (adev)->gmc.is_app_apu) void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index d2a42b6b1fa8..6d6243b978e1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -2021,7 +2021,7 @@ int kfd_topology_add_device(struct kfd_node *gpu) dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ? HSA_CAP_RASEVENTNOTIFY : 0; - if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev)) + if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev)) dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED; kfd_debug_print_topology(); -- cgit v1.2.3 From 895797d9193b38e759bc01268a8e3887e521f682 Mon Sep 17 00:00:00 2001 From: Graham Sider Date: Mon, 6 Feb 2023 14:04:42 -0500 Subject: drm/amdgpu/bu: Add use_mtype_cc_wa module param By default, set use_mtype_cc_wa to 1 to set PTE coherence flag MTYPE_CC instead of MTYPE_RW by default. This is required for the time being to mitigate a bug causing XCCs to hit stale data due to TCC marking fully dirty lines as exclusive. Signed-off-by: Graham Sider Reviewed-by: Joseph Greathouse Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 10 +++++++--- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 7 +++++-- 4 files changed, 20 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index cb9373f8c25a..cd2a29a7e26d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -212,6 +212,7 @@ extern int amdgpu_noretry; extern int amdgpu_force_asic_type; extern int amdgpu_smartshift_bias; extern int amdgpu_use_xgmi_p2p; +extern bool amdgpu_use_mtype_cc_wa; #ifdef CONFIG_HSA_AMD extern int sched_policy; extern bool debug_evictions; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index da4e50aef95a..8bc37826a99f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -822,6 +822,13 @@ MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault ( module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444); #endif +/** + * DOC: use_mtype_cc_wa (bool) + */ +bool amdgpu_use_mtype_cc_wa = true; +MODULE_PARM_DESC(use_mtype_cc_wa, "Use MTYPE_CC workaround (0 = use MTYPE_RW where applicable, 1 = use MTYPE_CC where applicable (default))"); +module_param_named(use_mtype_cc_wa, amdgpu_use_mtype_cc_wa, bool, 0444); + /** * DOC: pcie_p2p (bool) * Enable PCIe P2P (requires large-BAR). Default value: true (on) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2eb67b53e497..8623b93c05ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1187,6 +1187,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; unsigned int mtype; + unsigned int mtype_default; bool snoop = false; switch (adev->ip_versions[GC_HWIP][0]) { @@ -1230,7 +1231,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, /* FIXME: Needs more work for handling multiple memory * partitions (> NPS1 mode) e.g. NPS4 for both APU and dGPU * modes. + * FIXME: Temporarily using MTYPE_CC instead of MTYPE_RW where applicable. + * To force use of MTYPE_RW, set use_mtype_cc_wa=0 */ + mtype_default = amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; snoop = true; if (uncached) { mtype = MTYPE_UC; @@ -1245,14 +1249,14 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, * socket should be treated as remote access so MTYPE_RW * cannot be used always. */ - mtype = MTYPE_RW; + mtype = mtype_default; } else if (adev->flags & AMD_IS_APU) { /* APU on carve out mode */ - mtype = MTYPE_RW; + mtype = mtype_default; } else { /* dGPU */ if (is_vram && bo_adev == adev) - mtype = MTYPE_RW; + mtype = mtype_default; else if (is_vram) mtype = MTYPE_NC; else diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 2b2129dd1e4a..477ef9294203 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1198,9 +1198,12 @@ svm_range_get_pte_flags(struct kfd_node *node, if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; } else if (domain == SVM_RANGE_VRAM_DOMAIN) { - /* local HBM region close to partition */ + /* local HBM region close to partition + * FIXME: Temporarily using MTYPE_CC instead of MTYPE_RW where applicable. + * To force use of MTYPE_RW, set use_mtype_cc_wa=0 + */ if (bo_node == node) - mapping_flags |= AMDGPU_VM_MTYPE_RW; + mapping_flags |= amdgpu_use_mtype_cc_wa ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; /* local HBM region far from partition or remote XGMI GPU */ else if (svm_nodes_in_same_hive(bo_node, node)) mapping_flags |= AMDGPU_VM_MTYPE_NC; -- cgit v1.2.3 From 1e4a00334add40f609162914af7a24bc92951008 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 21 Feb 2023 17:31:32 -0500 Subject: drm/amdgpu: Fix per-BO MTYPE selection for GFXv9.4.3 Treat system memory on NUMA systems as remote by default. Overriding with a more efficient MTYPE per page will be implemented in the next patch. No need for a special case for APP APUs. System memory is handled the same for carve-out and native mode. And VRAM doesn't exist in native mode. Signed-off-by: Felix Kuehling Reviewed-by: Philip Yang Reviewed-and-tested-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 40 ++++++++++++++--------------------- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 24 ++++++++++++--------- 2 files changed, 30 insertions(+), 34 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 8623b93c05ee..cf976b5b7b63 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1186,9 +1186,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM; bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; - unsigned int mtype; - unsigned int mtype_default; + /* TODO: memory partitions struct amdgpu_vm *vm = mapping->bo_va->base.vm;*/ + unsigned int mtype_local, mtype; bool snoop = false; + bool is_local; switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 1): @@ -1228,35 +1229,26 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, } break; case IP_VERSION(9, 4, 3): - /* FIXME: Needs more work for handling multiple memory - * partitions (> NPS1 mode) e.g. NPS4 for both APU and dGPU - * modes. - * FIXME: Temporarily using MTYPE_CC instead of MTYPE_RW where applicable. - * To force use of MTYPE_RW, set use_mtype_cc_wa=0 + /* Only local VRAM BOs or system memory on non-NUMA APUs + * can be assumed to be local in their entirety. Choose + * MTYPE_NC as safe fallback for all system memory BOs on + * NUMA systems. Their MTYPE can be overridden per-page in + * gmc_v9_0_override_vm_pte_flags. */ - mtype_default = amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; + mtype_local = amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; + is_local = (!is_vram && (adev->flags & AMD_IS_APU) && + num_possible_nodes() <= 1) || + (is_vram && adev == bo_adev /* TODO: memory partitions && + bo->mem_id == vm->mem_id*/); snoop = true; if (uncached) { mtype = MTYPE_UC; - } else if (adev->gmc.is_app_apu) { - /* FIXME: APU in native mode, NPS1 single socket only - * - * For suporting NUMA partitioned APU e.g. in NPS4 mode, - * this need to look at the NUMA node on which the - * system memory allocation was done. - * - * Memory access by a different partition within same - * socket should be treated as remote access so MTYPE_RW - * cannot be used always. - */ - mtype = mtype_default; } else if (adev->flags & AMD_IS_APU) { - /* APU on carve out mode */ - mtype = mtype_default; + mtype = is_local ? mtype_local : MTYPE_NC; } else { /* dGPU */ - if (is_vram && bo_adev == adev) - mtype = mtype_default; + if (is_local) + mtype = mtype_local; else if (is_vram) mtype = MTYPE_NC; else diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 477ef9294203..4eec75b28917 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1151,6 +1151,7 @@ svm_range_get_pte_flags(struct kfd_node *node, bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN); bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT; bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED; + unsigned int mtype_local; if (domain == SVM_RANGE_VRAM_DOMAIN) bo_node = prange->svm_bo->node; @@ -1191,19 +1192,16 @@ svm_range_get_pte_flags(struct kfd_node *node, } break; case IP_VERSION(9, 4, 3): - //TODO: Need more work for handling multiple memory partitions - //e.g. NPS4. Current approch is only applicable without memory - //partitions. + mtype_local = amdgpu_use_mtype_cc_wa ? AMDGPU_VM_MTYPE_CC : + AMDGPU_VM_MTYPE_RW; snoop = true; if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; } else if (domain == SVM_RANGE_VRAM_DOMAIN) { - /* local HBM region close to partition - * FIXME: Temporarily using MTYPE_CC instead of MTYPE_RW where applicable. - * To force use of MTYPE_RW, set use_mtype_cc_wa=0 - */ - if (bo_node == node) - mapping_flags |= amdgpu_use_mtype_cc_wa ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; + /* local HBM region close to partition */ + if (bo_node->adev == node->adev /* TODO: memory partitions && + bo_node->mem_id == node->mem_id*/) + mapping_flags |= mtype_local; /* local HBM region far from partition or remote XGMI GPU */ else if (svm_nodes_in_same_hive(bo_node, node)) mapping_flags |= AMDGPU_VM_MTYPE_NC; @@ -1212,7 +1210,13 @@ svm_range_get_pte_flags(struct kfd_node *node, mapping_flags |= AMDGPU_VM_MTYPE_UC; /* system memory accessed by the APU */ } else if (node->adev->flags & AMD_IS_APU) { - mapping_flags |= AMDGPU_VM_MTYPE_NC; + /* On NUMA systems, locality is determined per-page + * in amdgpu_gmc_override_vm_pte_flags + */ + if (num_possible_nodes() <= 1) + mapping_flags |= mtype_local; + else + mapping_flags |= AMDGPU_VM_MTYPE_NC; /* system memory accessed by the dGPU */ } else { mapping_flags |= AMDGPU_VM_MTYPE_UC; -- cgit v1.2.3 From 76eb9c95a409ea820b2e7c968c220e7a38f27d76 Mon Sep 17 00:00:00 2001 From: David Francis Date: Mon, 27 Feb 2023 10:33:11 -0500 Subject: drm/amdgpu/bu: add mtype_local as a module parameter Selects the MTYPE to be used for local memory, (0 = MTYPE_CC (default), 1 = MTYPE_NC, 2 = MTYPE_RW) v2: squash in build fix (Alex) Reviewed-by: Graham Sider Signed-off-by: David Francis Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 19 ++++++++++++++++--- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 3 +-- 4 files changed, 22 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index cd2a29a7e26d..c2feaf2fd070 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -212,7 +212,7 @@ extern int amdgpu_noretry; extern int amdgpu_force_asic_type; extern int amdgpu_smartshift_bias; extern int amdgpu_use_xgmi_p2p; -extern bool amdgpu_use_mtype_cc_wa; +extern int amdgpu_mtype_local; #ifdef CONFIG_HSA_AMD extern int sched_policy; extern bool debug_evictions; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8bc37826a99f..706ba4af062f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -823,11 +823,11 @@ module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm #endif /** - * DOC: use_mtype_cc_wa (bool) + * DOC: mtype_local (int) */ -bool amdgpu_use_mtype_cc_wa = true; -MODULE_PARM_DESC(use_mtype_cc_wa, "Use MTYPE_CC workaround (0 = use MTYPE_RW where applicable, 1 = use MTYPE_CC where applicable (default))"); -module_param_named(use_mtype_cc_wa, amdgpu_use_mtype_cc_wa, bool, 0444); +int amdgpu_mtype_local; +MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_CC (default), 1 = MTYPE_NC, 2 = MTYPE_RW)"); +module_param_named(mtype_local, amdgpu_mtype_local, int, 0444); /** * DOC: pcie_p2p (bool) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c64a69f75da2..5a1414300271 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1235,7 +1235,16 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, * NUMA systems. Their MTYPE can be overridden per-page in * gmc_v9_0_override_vm_pte_flags. */ - mtype_local = amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; + mtype_local = MTYPE_CC; + if (amdgpu_mtype_local == 1) { + DRM_INFO_ONCE("Using MTYPE_NC for local memory\n"); + mtype_local = MTYPE_NC; + } else if (amdgpu_mtype_local == 2) { + DRM_INFO_ONCE("Using MTYPE_RW for local memory\n"); + mtype_local = MTYPE_RW; + } else { + DRM_INFO_ONCE("Using MTYPE_CC for local memory\n"); + } is_local = (!is_vram && (adev->flags & AMD_IS_APU) && num_possible_nodes() <= 1) || (is_vram && adev == bo_adev /* TODO: memory partitions && @@ -1349,9 +1358,13 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n", /*vm->mem_id*/0, local_node, nid); if (nid == local_node) { - unsigned int mtype_local = - amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; uint64_t old_flags = *flags; + unsigned int mtype_local = MTYPE_CC; + + if (amdgpu_mtype_local == 1) + mtype_local = MTYPE_NC; + else if (amdgpu_mtype_local == 2) + mtype_local = MTYPE_RW; *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | AMDGPU_PTE_MTYPE_VG10(mtype_local); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 4eec75b28917..df0ed5677609 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1192,8 +1192,7 @@ svm_range_get_pte_flags(struct kfd_node *node, } break; case IP_VERSION(9, 4, 3): - mtype_local = amdgpu_use_mtype_cc_wa ? AMDGPU_VM_MTYPE_CC : - AMDGPU_VM_MTYPE_RW; + mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_RW : AMDGPU_VM_MTYPE_CC); snoop = true; if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; -- cgit v1.2.3 From b9cbd51000ad3541351ca832b00600870ac08e5c Mon Sep 17 00:00:00 2001 From: Graham Sider Date: Mon, 6 Mar 2023 17:56:44 -0500 Subject: drm/amdgpu/bu: update mtype_local parameter settings Update mtype_local module parameter to use MTYPE_RW by default. 0: MTYPE_RW (default) 1: MTYPE_NC 2: MTYPE_CC Signed-off-by: Graham Sider Reviewed-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 12 ++++++------ drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 3 ++- 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 706ba4af062f..aa466a9eb956 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -826,7 +826,7 @@ module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm * DOC: mtype_local (int) */ int amdgpu_mtype_local; -MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_CC (default), 1 = MTYPE_NC, 2 = MTYPE_RW)"); +MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)"); module_param_named(mtype_local, amdgpu_mtype_local, int, 0444); /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 5a1414300271..32eb4f4f5492 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1235,15 +1235,15 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, * NUMA systems. Their MTYPE can be overridden per-page in * gmc_v9_0_override_vm_pte_flags. */ - mtype_local = MTYPE_CC; + mtype_local = MTYPE_RW; if (amdgpu_mtype_local == 1) { DRM_INFO_ONCE("Using MTYPE_NC for local memory\n"); mtype_local = MTYPE_NC; } else if (amdgpu_mtype_local == 2) { - DRM_INFO_ONCE("Using MTYPE_RW for local memory\n"); - mtype_local = MTYPE_RW; - } else { DRM_INFO_ONCE("Using MTYPE_CC for local memory\n"); + mtype_local = MTYPE_CC; + } else { + DRM_INFO_ONCE("Using MTYPE_RW for local memory\n"); } is_local = (!is_vram && (adev->flags & AMD_IS_APU) && num_possible_nodes() <= 1) || @@ -1359,12 +1359,12 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, /*vm->mem_id*/0, local_node, nid); if (nid == local_node) { uint64_t old_flags = *flags; - unsigned int mtype_local = MTYPE_CC; + unsigned int mtype_local = MTYPE_RW; if (amdgpu_mtype_local == 1) mtype_local = MTYPE_NC; else if (amdgpu_mtype_local == 2) - mtype_local = MTYPE_RW; + mtype_local = MTYPE_CC; *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | AMDGPU_PTE_MTYPE_VG10(mtype_local); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index df0ed5677609..e6348d4133fd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1192,7 +1192,8 @@ svm_range_get_pte_flags(struct kfd_node *node, } break; case IP_VERSION(9, 4, 3): - mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_RW : AMDGPU_VM_MTYPE_CC); + mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : + (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW); snoop = true; if (uncached) { mapping_flags |= AMDGPU_VM_MTYPE_UC; -- cgit v1.2.3 From 2046ed6c8aa951e4ae83c5022bb0a7c777386097 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 26 Jan 2023 18:45:32 -0500 Subject: drm/amdkfd: SVM range allocation support memory partition Pass kfd node->xcp->mem_id to amdgpu bo create parameter mem_id_plus1 to allocate new svm_bo on the specified memory partition. This is only for dGPU mode as we don't migrate with APU mode. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index e6348d4133fd..62aa7fb2eaa5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -555,16 +555,20 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE; bp.type = ttm_bo_type_device; bp.resv = NULL; + if (node->xcp) + bp.mem_id_plus1 = node->xcp->mem_id + 1; - /* TODO: Allocate memory from the right memory partition. We can sort - * out the details later, once basic memory partitioning is working - */ r = amdgpu_bo_create_user(node->adev, &bp, &ubo); if (r) { pr_debug("failed %d to create bo\n", r); goto create_bo_failed; } bo = &ubo->bo; + + pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n", + bo->tbo.resource->start << PAGE_SHIFT, bp.size, + bp.mem_id_plus1 - 1); + r = amdgpu_bo_reserve(bo, true); if (r) { pr_debug("failed %d to reserve bo\n", r); -- cgit v1.2.3 From dc12f9eddedb8b41f4dc948e5e636e5221fb4d43 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 2 Feb 2023 11:07:53 -0500 Subject: drm/amdkfd: Update MTYPE for far memory partition Use MTYPE RW/MTYPE_CC for mapping system memory or VRAM to KFD node within the same memory partition, use MTYPE_NC for mapping on KFD node from the far memory partition of the same socket or from another socket on same XGMI hive. On NPS4 or 4P system, MTYPE will be overridden per page depending on the memory NUMA node id and vm->mem_id. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 15 +++++++-------- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 9 +++++---- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 32eb4f4f5492..263d17a8b433 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1186,7 +1186,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM; bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; - /* TODO: memory partitions struct amdgpu_vm *vm = mapping->bo_va->base.vm;*/ + struct amdgpu_vm *vm = mapping->bo_va->base.vm; unsigned int mtype_local, mtype; bool snoop = false; bool is_local; @@ -1247,8 +1247,8 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, } is_local = (!is_vram && (adev->flags & AMD_IS_APU) && num_possible_nodes() <= 1) || - (is_vram && adev == bo_adev /* TODO: memory partitions && - bo->mem_id == vm->mem_id*/); + (is_vram && adev == bo_adev && + bo->mem_id == vm->mem_id); snoop = true; if (uncached) { mtype = MTYPE_UC; @@ -1335,13 +1335,12 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, return; } - /* TODO: memory partitions. mem_id is hard-coded to 0 for now. - * FIXME: Only supported on native mode for now. For carve-out, the + /* FIXME: Only supported on native mode for now. For carve-out, the * NUMA affinity of the GPU/VM needs to come from the PCI info because * memory partitions are not associated with different NUMA nodes. */ - if (adev->gmc.is_app_apu) { - local_node = adev->gmc.mem_partitions[/*vm->mem_id*/0].numa.node; + if (adev->gmc.is_app_apu && vm->mem_id >= 0) { + local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node; } else { dev_dbg(adev->dev, "Only native mode APU is supported.\n"); return; @@ -1356,7 +1355,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, } nid = pfn_to_nid(addr >> PAGE_SHIFT); dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n", - /*vm->mem_id*/0, local_node, nid); + vm->mem_id, local_node, nid); if (nid == local_node) { uint64_t old_flags = *flags; unsigned int mtype_local = MTYPE_RW; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 62aa7fb2eaa5..a700d9ccd054 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1203,8 +1203,8 @@ svm_range_get_pte_flags(struct kfd_node *node, mapping_flags |= AMDGPU_VM_MTYPE_UC; } else if (domain == SVM_RANGE_VRAM_DOMAIN) { /* local HBM region close to partition */ - if (bo_node->adev == node->adev /* TODO: memory partitions && - bo_node->mem_id == node->mem_id*/) + if (bo_node->adev == node->adev && + (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id)) mapping_flags |= mtype_local; /* local HBM region far from partition or remote XGMI GPU */ else if (svm_nodes_in_same_hive(bo_node, node)) @@ -1358,8 +1358,9 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0, pte_flags); - /* TODO: we still need to determine the vm_manager.vram_base_offset based on - * the memory partition. + /* For dGPU mode, we use same vm_manager to allocate VRAM for + * different memory partition based on fpfn/lpfn, we should use + * same vm_manager.vram_base_offset regardless memory partition. */ r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, last_start, prange->start + i, -- cgit v1.2.3 From 3ebfd221c1a83e5f0edadb87d173d8fd93d1d125 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Wed, 8 Mar 2023 11:57:00 -0500 Subject: drm/amdkfd: Store xcp partition id to amdgpu bo For memory accounting per compute partition and export drm amdgpu bo and then import to KFD, we need the xcp id to account the memory usage or find the KFD node of the original amdgpu bo to create the KFD bo on the correct adev KFD node. Set xcp_id_plus1 of amdgpu_bo_param to create bo and store xcp_id to amddgpu bo. Add helper macro to get the mem_id from adev and xcp_id. v2: squash in fix ("drm/amdgpu: Fix BO creation failure on GFX 9.4.3 dGPU") Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 11 ++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 15 ++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 12 ++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 4 ++-- 10 files changed, 42 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 324cb566ca2f..05c54776951b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -330,6 +330,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag); +#define KFD_XCP_MEM_ID(adev, xcp_id) \ + ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ + (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) + #define KFD_XCP_MEMORY_SIZE(n) ((n)->adev->gmc.num_mem_partitions ?\ (n)->adev->gmc.mem_partitions[(n)->xcp->mem_id].size /\ (n)->adev->xcp_mgr->num_xcp_per_mem_partition :\ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index c234dc0db799..8724a0be31b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1634,6 +1634,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( uint64_t *offset, uint32_t flags, bool criu_resume) { struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); + struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm); enum ttm_bo_type bo_type = ttm_bo_type_device; struct sg_table *sg = NULL; uint64_t user_addr = 0; @@ -1641,7 +1642,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct drm_gem_object *gobj = NULL; u32 domain, alloc_domain; uint64_t aligned_size; - int8_t mem_id = -1; + int8_t xcp_id = -1; u64 alloc_flags; int ret; @@ -1660,7 +1661,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; } - mem_id = avm->mem_id; + xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id; } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; @@ -1718,12 +1719,12 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( goto err_reserve_limit; } - pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s mem_id %d\n", + pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n", va, (*mem)->aql_queue ? size << 1 : size, - domain_string(alloc_domain), mem_id); + domain_string(alloc_domain), xcp_id); ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags, - bo_type, NULL, &gobj, mem_id + 1); + bo_type, NULL, &gobj, xcp_id + 1); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", domain_string(alloc_domain), ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 33ebee18b80d..7e8839cc6f58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -98,7 +98,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, struct dma_resv *resv, - struct drm_gem_object **obj, int8_t mem_id_plus1) + struct drm_gem_object **obj, int8_t xcp_id_plus1) { struct amdgpu_bo *bo; struct amdgpu_bo_user *ubo; @@ -116,7 +116,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bp.flags = flags; bp.domain = initial_domain; bp.bo_ptr_size = sizeof(struct amdgpu_bo); - bp.mem_id_plus1 = mem_id_plus1; + bp.xcp_id_plus1 = xcp_id_plus1; r = amdgpu_bo_create_user(adev, &bp, &ubo); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index 646c4fcc8e40..f30264782ba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -43,7 +43,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, struct dma_resv *resv, - struct drm_gem_object **obj, int8_t mem_id_plus1); + struct drm_gem_object **obj, int8_t xcp_id_plus1); int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b2d11c4f39b0..42c02f48c3a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -131,14 +131,15 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_VRAM) { unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; + int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); - if (adev->gmc.mem_partitions && abo->mem_id >= 0) { - places[c].fpfn = adev->gmc.mem_partitions[abo->mem_id].range.fpfn; + if (adev->gmc.mem_partitions && mem_id >= 0) { + places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn; /* * memory partition range lpfn is inclusive start + size - 1 * TTM place lpfn is exclusive start + size */ - places[c].lpfn = adev->gmc.mem_partitions[abo->mem_id].range.lpfn + 1; + places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1; } else { places[c].fpfn = 0; places[c].lpfn = 0; @@ -583,8 +584,12 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bo->flags = bp->flags; - /* bo->mem_id -1 means any partition */ - bo->mem_id = bp->mem_id_plus1 - 1; + if (adev->gmc.mem_partitions) + /* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */ + bo->xcp_id = bp->xcp_id_plus1 - 1; + else + /* For GPUs without spatial partitioning */ + bo->xcp_id = 0; if (!amdgpu_bo_support_uswc(bo->flags)) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index eb24a66ccee5..05496b97ef93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -56,8 +56,8 @@ struct amdgpu_bo_param { bool no_wait_gpu; struct dma_resv *resv; void (*destroy)(struct ttm_buffer_object *bo); - /* memory partition number plus 1, 0 means any partition */ - int8_t mem_id_plus1; + /* xcp partition number plus 1, 0 means any partition */ + int8_t xcp_id_plus1; }; /* bo virtual addresses in a vm */ @@ -111,8 +111,12 @@ struct amdgpu_bo { #endif struct kgd_mem *kfd_bo; - /* memory partition number, -1 means any partition */ - int8_t mem_id; + /* + * For GPUs with spatial partitioning, xcp partition number, -1 means + * any partition. For other ASICs without spatial partition, always 0 + * for memory accounting. + */ + int8_t xcp_id; }; struct amdgpu_bo_user { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 129c593cb2bd..23101c82519a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1051,6 +1051,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_ttm_tt *gtt; enum ttm_caching caching; @@ -1060,7 +1061,10 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, return NULL; } gtt->gobj = &bo->base; - gtt->pool_id = abo->mem_id; + if (adev->gmc.mem_partitions && abo->xcp_id >= 0) + gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); + else + gtt->pool_id = abo->xcp_id; if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) caching = ttm_write_combined; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 62fc7e8d326e..cc3b1b596e56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -502,6 +502,7 @@ exit: int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, int level, bool immediate, struct amdgpu_bo_vm **vmbo) { + struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm); struct amdgpu_bo_param bp; struct amdgpu_bo *bo; struct dma_resv *resv; @@ -534,7 +535,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.type = ttm_bo_type_kernel; bp.no_wait_gpu = immediate; - bp.mem_id_plus1 = vm->mem_id + 1; + bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; if (vm->root.bo) bp.resv = vm->root.bo->tbo.base.resv; @@ -560,7 +561,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.type = ttm_bo_type_kernel; bp.resv = bo->tbo.base.resv; bp.bo_ptr_size = sizeof(struct amdgpu_bo); - bp.mem_id_plus1 = vm->mem_id + 1; + bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 263d17a8b433..7ea80bdf8e1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1248,7 +1248,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, is_local = (!is_vram && (adev->flags & AMD_IS_APU) && num_possible_nodes() <= 1) || (is_vram && adev == bo_adev && - bo->mem_id == vm->mem_id); + KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id); snoop = true; if (uncached) { mtype = MTYPE_UC; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index a700d9ccd054..45959892bc0f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -556,7 +556,7 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bp.type = ttm_bo_type_device; bp.resv = NULL; if (node->xcp) - bp.mem_id_plus1 = node->xcp->mem_id + 1; + bp.xcp_id_plus1 = node->xcp->id + 1; r = amdgpu_bo_create_user(node->adev, &bp, &ubo); if (r) { @@ -567,7 +567,7 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n", bo->tbo.resource->start << PAGE_SHIFT, bp.size, - bp.mem_id_plus1 - 1); + bp.xcp_id_plus1 - 1); r = amdgpu_bo_reserve(bo, true); if (r) { -- cgit v1.2.3 From 1c77527a69d5ca19cb276e2728992d922b687f35 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 20 Mar 2023 11:22:30 -0400 Subject: drm/amdkfd: Fix memory reporting on GFX 9.4.3 This patch fixes memory reporting on the GFX 9.4.3 APU and dGPU by reporting available memory on a per partition basis. If its an APU, available and used memory calculations take into account system and TTM memory. v2: squash in fix ("drm/amdkfd: Fix array out of bound warning") squash in fix ("drm/amdgpu: Update memory reporting for GFX9.4.3") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 12 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 81 ++++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 5 ++ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 14 ++-- 5 files changed, 84 insertions(+), 31 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 4bf6f5659568..948d362adabb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -35,6 +35,7 @@ #include #include "amdgpu_sync.h" #include "amdgpu_vm.h" +#include "amdgpu_xcp.h" extern uint64_t amdgpu_amdkfd_total_mem_size; @@ -98,8 +99,8 @@ struct amdgpu_amdkfd_fence { struct amdgpu_kfd_dev { struct kfd_dev *dev; - int64_t vram_used; - uint64_t vram_used_aligned; + int64_t vram_used[MAX_XCP]; + uint64_t vram_used_aligned[MAX_XCP]; bool init_complete; struct work_struct reset_work; @@ -287,7 +288,8 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, void *drm_priv); uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); -size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev); +size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, + uint8_t xcp_id); int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct amdgpu_device *adev, uint64_t va, uint64_t size, void *drm_priv, struct kgd_mem **mem, @@ -327,9 +329,9 @@ void amdgpu_amdkfd_block_mmu_notifications(void *p); int amdgpu_amdkfd_criu_resume(void *p); bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev); int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag); + uint64_t size, u32 alloc_flag, int8_t xcp_id); void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag); + uint64_t size, u32 alloc_flag, int8_t xcp_id); #define KFD_XCP_MEM_ID(adev, xcp_id) \ ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 8724a0be31b8..cc37f04651e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -157,12 +157,13 @@ void amdgpu_amdkfd_reserve_system_mem(uint64_t size) * Return: returns -ENOMEM in case of error, ZERO otherwise */ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag) + uint64_t size, u32 alloc_flag, int8_t xcp_id) { uint64_t reserved_for_pt = ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); size_t system_mem_needed, ttm_mem_needed, vram_needed; int ret = 0; + uint64_t vram_size = 0; system_mem_needed = 0; ttm_mem_needed = 0; @@ -177,6 +178,17 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, * 2M BO chunk. */ vram_needed = size; + /* + * For GFX 9.4.3, get the VRAM size from XCP structs + */ + if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) + return -EINVAL; + + vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id); + if (adev->gmc.is_app_apu) { + system_mem_needed = size; + ttm_mem_needed = size; + } } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { system_mem_needed = size; } else if (!(alloc_flag & @@ -196,8 +208,8 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > kfd_mem_limit.max_ttm_mem_limit) || - (adev && adev->kfd.vram_used + vram_needed > - adev->gmc.real_vram_size - reserved_for_pt)) { + (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > + vram_size - reserved_for_pt)) { ret = -ENOMEM; goto release; } @@ -207,9 +219,11 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, */ WARN_ONCE(vram_needed && !adev, "adev reference can't be null when vram is used"); - if (adev) { - adev->kfd.vram_used += vram_needed; - adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); + if (adev && xcp_id >= 0) { + adev->kfd.vram_used[xcp_id] += vram_needed; + adev->kfd.vram_used_aligned[xcp_id] += adev->gmc.is_app_apu ? + vram_needed : + ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); } kfd_mem_limit.system_mem_used += system_mem_needed; kfd_mem_limit.ttm_mem_used += ttm_mem_needed; @@ -220,7 +234,7 @@ release: } void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag) + uint64_t size, u32 alloc_flag, int8_t xcp_id) { spin_lock(&kfd_mem_limit.mem_limit_lock); @@ -230,9 +244,19 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { WARN_ONCE(!adev, "adev reference can't be null when alloc mem flags vram is set"); + if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) + goto release; + if (adev) { - adev->kfd.vram_used -= size; - adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN); + adev->kfd.vram_used[xcp_id] -= size; + if (adev->gmc.is_app_apu) { + adev->kfd.vram_used_aligned[xcp_id] -= size; + kfd_mem_limit.system_mem_used -= size; + kfd_mem_limit.ttm_mem_used -= size; + } else { + adev->kfd.vram_used_aligned[xcp_id] -= + ALIGN(size, VRAM_AVAILABLITY_ALIGN); + } } } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { kfd_mem_limit.system_mem_used -= size; @@ -242,8 +266,8 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); goto release; } - WARN_ONCE(adev && adev->kfd.vram_used < 0, - "KFD VRAM memory accounting unbalanced"); + WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0, + "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id); WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, "KFD TTM memory accounting unbalanced"); WARN_ONCE(kfd_mem_limit.system_mem_used < 0, @@ -259,7 +283,8 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) u32 alloc_flags = bo->kfd_bo->alloc_flags; u64 size = amdgpu_bo_size(bo); - amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags); + amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags, + bo->xcp_id); kfree(bo->kfd_bo); } @@ -1609,23 +1634,42 @@ out_unlock: return ret; } -size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev) +size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, + uint8_t xcp_id) { uint64_t reserved_for_pt = ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); ssize_t available; + uint64_t vram_available, system_mem_available, ttm_mem_available; spin_lock(&kfd_mem_limit.mem_limit_lock); - available = adev->gmc.real_vram_size - - adev->kfd.vram_used_aligned + vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id) + - adev->kfd.vram_used_aligned[xcp_id] - atomic64_read(&adev->vram_pin_size) - reserved_for_pt; + + if (adev->gmc.is_app_apu) { + system_mem_available = no_system_mem_limit ? + kfd_mem_limit.max_system_mem_limit : + kfd_mem_limit.max_system_mem_limit - + kfd_mem_limit.system_mem_used; + + ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit - + kfd_mem_limit.ttm_mem_used; + + available = min3(system_mem_available, ttm_mem_available, + vram_available); + available = ALIGN_DOWN(available, PAGE_SIZE); + } else { + available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN); + } + spin_unlock(&kfd_mem_limit.mem_limit_lock); if (available < 0) available = 0; - return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN); + return available; } int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( @@ -1713,7 +1757,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( amdgpu_sync_create(&(*mem)->sync); - ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags); + ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags, + xcp_id); if (ret) { pr_debug("Insufficient memory\n"); goto err_reserve_limit; @@ -1781,7 +1826,7 @@ err_node_allow: /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: - amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags); + amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id); err_reserve_limit: mutex_destroy(&(*mem)->lock); if (gobj) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 68b63b970ce8..9c5912b9d8bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -24,8 +24,11 @@ #ifndef AMDGPU_XCP_H #define AMDGPU_XCP_H +#include #include +#include "amdgpu_ctx.h" + #define MAX_XCP 8 #define AMDGPU_XCP_MODE_NONE -1 @@ -34,6 +37,8 @@ #define AMDGPU_XCP_FL_NONE 0 #define AMDGPU_XCP_FL_LOCKED (1 << 0) +struct amdgpu_fpriv; + enum AMDGPU_XCP_IP_BLOCK { AMDGPU_XCP_GFXHUB, AMDGPU_XCP_GFX, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 1ae867482bc7..a9efff94390b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1044,7 +1044,8 @@ static int kfd_ioctl_get_available_memory(struct file *filep, if (!pdd) return -EINVAL; - args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev); + args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev, + pdd->dev->node_id); kfd_unlock_pdd(pdd); return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 45959892bc0f..c1ab70faf36e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -280,7 +280,7 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage) if (update_mem_usage && !p->xnack_enabled) { pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size); amdgpu_amdkfd_unreserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); } mutex_destroy(&prange->lock); mutex_destroy(&prange->migrate_mutex); @@ -313,7 +313,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, p = container_of(svms, struct kfd_process, svms); if (!p->xnack_enabled && update_mem_usage && amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) { + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) { pr_info("SVM mapping failed, exceeds resident system memory limit\n"); kfree(prange); return NULL; @@ -3037,10 +3037,10 @@ svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled) size = (pchild->last - pchild->start + 1) << PAGE_SHIFT; if (xnack_enabled) { amdgpu_amdkfd_unreserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); } else { r = amdgpu_amdkfd_reserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); if (r) goto out_unlock; reserved_size += size; @@ -3050,10 +3050,10 @@ svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled) size = (prange->last - prange->start + 1) << PAGE_SHIFT; if (xnack_enabled) { amdgpu_amdkfd_unreserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); } else { r = amdgpu_amdkfd_reserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); if (r) goto out_unlock; reserved_size += size; @@ -3066,7 +3066,7 @@ out_unlock: if (r) amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); else /* Change xnack mode must be inside svms lock, to avoid race with * svm_range_deferred_list_work unreserve memory in parallel. -- cgit v1.2.3 From 25f50704343de1bea70100ad41621b5737a6a96b Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 23 Mar 2023 08:45:56 -0400 Subject: drm/amdkfd: APU mode set max svm range pages svm_migrate_init set the max svm range pages based on the KFD nodes partition size. APU mode don't init pgmap because there is no migration. kgd2kfd_device_init calls svm_migrate_init after KFD nodes allocation and initialization. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 5 ++--- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 7 +++++-- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 15 ++++++++++----- 3 files changed, 17 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index d41da964d2f5..882ff86bba08 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -724,9 +724,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_cwsr_init(kfd); - svm_migrate_init(kfd->adev); - - dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", kfd->num_nodes); @@ -794,6 +791,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->nodes[i] = node; } + svm_migrate_init(kfd->adev); + if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 199d32c7c289..2512bf681112 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -1000,6 +1000,11 @@ int svm_migrate_init(struct amdgpu_device *adev) if (!KFD_IS_SOC15(kfddev->dev)) return -EINVAL; + svm_range_set_max_pages(adev); + + if (adev->gmc.is_app_apu) + return 0; + pgmap = &kfddev->pgmap; memset(pgmap, 0, sizeof(*pgmap)); @@ -1042,8 +1047,6 @@ int svm_migrate_init(struct amdgpu_device *adev) amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size)); - svm_range_set_max_pages(adev); - pr_info("HMM registered %ldMB device memory\n", size >> 20); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index c1ab70faf36e..206851c9e642 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1938,14 +1938,19 @@ void svm_range_set_max_pages(struct amdgpu_device *adev) { uint64_t max_pages; uint64_t pages, _pages; + uint64_t min_pages = 0; + int i; + + for (i = 0; i < adev->kfd.dev->num_nodes; i++) { + pages = KFD_XCP_MEMORY_SIZE(adev, adev->kfd.dev->nodes[i]->xcp->id) >> 17; + pages = clamp(pages, 1ULL << 9, 1ULL << 18); + pages = rounddown_pow_of_two(pages); + min_pages = min_not_zero(min_pages, pages); + } - /* 1/32 VRAM size in pages */ - pages = adev->gmc.real_vram_size >> 17; - pages = clamp(pages, 1ULL << 9, 1ULL << 18); - pages = rounddown_pow_of_two(pages); do { max_pages = READ_ONCE(max_svm_range_pages); - _pages = min_not_zero(max_pages, pages); + _pages = min_not_zero(max_pages, min_pages); } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages); } -- cgit v1.2.3 From 45b3a914d40e63d2c9e3a3e02fb2014be975b9b0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 16 May 2023 17:16:30 -0400 Subject: drm/amdgpu/gmc9: fix 64 bit division in partition code Rework logic or use do_div() to avoid problems on 32 bit. v2: add a missing case for XCP macro v3: fix out of bounds array access v4: fix xcp handling harder Acked-by: Guchun Chen (v1) Reviewed-by: Mukul Joshi (v3) Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 15 +++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 9 ++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++----- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 ++++++-- 5 files changed, 35 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 739eb7c0d133..5de92c9ab18f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -794,3 +794,18 @@ void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) { kgd2kfd_unlock_kfd(); } + + +u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) +{ + u64 tmp; + s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id); + + if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) { + tmp = adev->gmc.mem_partitions[mem_id].size; + do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition); + return tmp; + } else { + return adev->gmc.real_vram_size; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index be43d71ba7ef..94cc456761e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -333,15 +333,14 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag, int8_t xcp_id); +u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id); + #define KFD_XCP_MEM_ID(adev, xcp_id) \ ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) -#define KFD_XCP_MEMORY_SIZE(adev, xcp_id)\ - ((adev)->gmc.num_mem_partitions && (xcp_id) >= 0 ?\ - (adev)->gmc.mem_partitions[KFD_XCP_MEM_ID((adev), (xcp_id))].size /\ - (adev)->xcp_mgr->num_xcp_per_mem_partition :\ - (adev)->gmc.real_vram_size) +#define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id)) + #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 23101c82519a..902773ce41b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -814,11 +814,14 @@ static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev, struct amdgpu_ttm_tt *gtt = (void *)ttm; uint64_t total_pages = ttm->num_pages; int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp); - uint64_t page_idx, pages_per_xcc = total_pages / num_xcc; + uint64_t page_idx, pages_per_xcc; int i; uint64_t ctrl_flags = (flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); + pages_per_xcc = total_pages; + do_div(pages_per_xcc, num_xcc); + for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) { /* MQD page: use default flags */ amdgpu_gart_bind(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 7ea80bdf8e1e..f70e666cecf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1914,9 +1914,10 @@ gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev, adev->gmc.num_mem_partitions = num_ranges; /* If there is only partition, don't use entire size */ - if (adev->gmc.num_mem_partitions == 1) - mem_ranges[0].size = - (mem_ranges[0].size * (mem_groups - 1) / mem_groups); + if (adev->gmc.num_mem_partitions == 1) { + mem_ranges[0].size = mem_ranges[0].size * (mem_groups - 1); + do_div(mem_ranges[0].size, mem_groups); + } } static void @@ -1948,8 +1949,8 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, break; } - size = (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) / - adev->gmc.num_mem_partitions; + size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT; + size /= adev->gmc.num_mem_partitions; for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { mem_ranges[i].range.fpfn = start_addr; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 206851c9e642..b0f0d31bf3e6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1939,10 +1939,14 @@ void svm_range_set_max_pages(struct amdgpu_device *adev) uint64_t max_pages; uint64_t pages, _pages; uint64_t min_pages = 0; - int i; + int i, id; for (i = 0; i < adev->kfd.dev->num_nodes; i++) { - pages = KFD_XCP_MEMORY_SIZE(adev, adev->kfd.dev->nodes[i]->xcp->id) >> 17; + if (adev->kfd.dev->nodes[i]->xcp) + id = adev->kfd.dev->nodes[i]->xcp->id; + else + id = -1; + pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17; pages = clamp(pages, 1ULL << 9, 1ULL << 18); pages = rounddown_pow_of_two(pages); min_pages = min_not_zero(min_pages, pages); -- cgit v1.2.3 From f2cd6b26922e68ffafd14a9128e20630296e430d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 17 May 2023 15:04:35 -0400 Subject: drm/amdkfd: fix stack size in svm_range_validate_and_map MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allocate large local variable on heap to avoid exceeding the stack size: drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c: In function ‘svm_range_validate_and_map’: drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c:1690:1: warning: the frame size of 2360 bytes is larger than 2048 bytes [-Wframe-larger-than=] Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 56 +++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index b0f0d31bf3e6..ee16130ddc75 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1554,48 +1554,54 @@ static int svm_range_validate_and_map(struct mm_struct *mm, struct svm_range *prange, int32_t gpuidx, bool intr, bool wait, bool flush_tlb) { - struct svm_validate_context ctx; + struct svm_validate_context *ctx; unsigned long start, end, addr; struct kfd_process *p; void *owner; int32_t idx; int r = 0; - ctx.process = container_of(prange->svms, struct kfd_process, svms); - ctx.prange = prange; - ctx.intr = intr; + ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + ctx->process = container_of(prange->svms, struct kfd_process, svms); + ctx->prange = prange; + ctx->intr = intr; if (gpuidx < MAX_GPU_INSTANCE) { - bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE); - bitmap_set(ctx.bitmap, gpuidx, 1); - } else if (ctx.process->xnack_enabled) { - bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); + bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE); + bitmap_set(ctx->bitmap, gpuidx, 1); + } else if (ctx->process->xnack_enabled) { + bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); /* If prefetch range to GPU, or GPU retry fault migrate range to * GPU, which has ACCESS attribute to the range, create mapping * on that GPU. */ if (prange->actual_loc) { - gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process, + gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process, prange->actual_loc); if (gpuidx < 0) { WARN_ONCE(1, "failed get device by id 0x%x\n", prange->actual_loc); - return -EINVAL; + r = -EINVAL; + goto free_ctx; } if (test_bit(gpuidx, prange->bitmap_access)) - bitmap_set(ctx.bitmap, gpuidx, 1); + bitmap_set(ctx->bitmap, gpuidx, 1); } } else { - bitmap_or(ctx.bitmap, prange->bitmap_access, + bitmap_or(ctx->bitmap, prange->bitmap_access, prange->bitmap_aip, MAX_GPU_INSTANCE); } - if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) { - if (!prange->mapped_to_gpu) - return 0; + if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { + if (!prange->mapped_to_gpu) { + r = 0; + goto free_ctx; + } - bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); + bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); } if (prange->actual_loc && !prange->ttm_res) { @@ -1603,15 +1609,16 @@ static int svm_range_validate_and_map(struct mm_struct *mm, * svm_migrate_ram_to_vram after allocating a BO. */ WARN_ONCE(1, "VRAM BO missing during validation\n"); - return -EINVAL; + r = -EINVAL; + goto free_ctx; } - svm_range_reserve_bos(&ctx); + svm_range_reserve_bos(ctx); p = container_of(prange->svms, struct kfd_process, svms); - owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap, + owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap, MAX_GPU_INSTANCE)); - for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) { + for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) { if (kfd_svm_page_owner(p, idx) != owner) { owner = NULL; break; @@ -1648,7 +1655,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } offset = (addr - start) >> PAGE_SHIFT; - r = svm_range_dma_map(prange, ctx.bitmap, offset, npages, + r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, hmm_range->hmm_pfns); if (r) { pr_debug("failed %d to dma map range\n", r); @@ -1668,7 +1675,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } r = svm_range_map_to_gpus(prange, offset, npages, readonly, - ctx.bitmap, wait, flush_tlb); + ctx->bitmap, wait, flush_tlb); unlock_out: svm_range_unlock(prange); @@ -1682,11 +1689,14 @@ unlock_out: } unreserve_out: - svm_range_unreserve_bos(&ctx); + svm_range_unreserve_bos(ctx); if (!r) prange->validate_timestamp = ktime_get_boottime(); +free_ctx: + kfree(ctx); + return r; } -- cgit v1.2.3 From c22b044070971e474dd0ff81a9830df93751f726 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Mon, 29 May 2023 16:01:37 -0500 Subject: drm/amdkfd: flag added to handle errors from svm validate and map If a return error is raised during validation and mapping of a prange, this flag is set. It is a rare occurrence, but it could happen when `amdgpu_hmm_range_get_pages_done` returns true. In such cases, the caller should retry. However, it is important to ensure that the prange is updated correctly during the retry. Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index ee16130ddc75..9c88d6e90c3b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -809,7 +809,7 @@ svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange, } } - return true; + return !prange->is_error_flag; } /** @@ -1691,6 +1691,7 @@ unlock_out: unreserve_out: svm_range_unreserve_bos(ctx); + prange->is_error_flag = !!r; if (!r) prange->validate_timestamp = ktime_get_boottime(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 762679835e31..21b14510882b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -134,6 +134,7 @@ struct svm_range { DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE); bool validated_once; bool mapped_to_gpu; + bool is_error_flag; }; static inline void svm_range_lock(struct svm_range *prange) -- cgit v1.2.3 From 16cc3a221537bb3588ec2a568d7bd0e7972b25a8 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 31 May 2023 23:33:35 +0530 Subject: drm/amdgpu: Add function parameter 'event' to kdoc in svm_range_evict() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes the following gcc with W=1: drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c:1841: warning: Function parameter or member 'event' not described in 'svm_range_evict' Cc: Felix Kuehling Cc: Christian König Cc: Alex Deucher Signed-off-by: Srinivasan Shanmugam Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9c88d6e90c3b..615eab3f78c9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1826,6 +1826,7 @@ out_reschedule: * @mm: current process mm_struct * @start: starting process queue number * @last: last process queue number + * @event: mmu notifier event when range is evicted or migrated * * Stop all queues of the process to ensure GPU doesn't access the memory, then * return to let CPU evict the buffer and proceed CPU pagetable update. -- cgit v1.2.3 From ba3c87fffb79311f54464288c66421d19c2c1234 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 2 Jun 2023 12:58:05 -0400 Subject: amd/amdkfd: drop unused KFD_IOCTL_SVM_FLAG_UNCACHED flag Was leftover from GC 9.4.3 bring up and is currently unused. Drop it for now. Cc: Philip.Yang@amd.com Cc: rajneesh.bhardwaj@amd.com Cc: Felix.Kuehling@amd.com Reviewed-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 2 +- include/uapi/linux/kfd_ioctl.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 615eab3f78c9..5ff1a5a89d96 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1154,7 +1154,7 @@ svm_range_get_pte_flags(struct kfd_node *node, uint64_t pte_flags; bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN); bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT; - bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED; + bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/ unsigned int mtype_local; if (domain == SVM_RANGE_VRAM_DOMAIN) diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 2a9671e1ddb5..2da5c3ad71bd 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -623,8 +623,6 @@ enum kfd_mmio_remap { #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 /* Keep GPU memory mapping always valid as if XNACK is disable */ #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 -/* Uncached access to memory */ -#define KFD_IOCTL_SVM_FLAG_UNCACHED 0x00000080 /** * kfd_ioctl_svm_op - SVM ioctl operations -- cgit v1.2.3