diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 450 |
1 files changed, 286 insertions, 164 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 1bcbade479dc..039b57e4644c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -51,11 +51,13 @@ #include "amdgpu_ih.h" #include "amdgpu_irq.h" #include "amdgpu_ucode.h" +#include "amdgpu_ttm.h" #include "amdgpu_gds.h" #include "amd_powerplay.h" #include "amdgpu_acp.h" #include "gpu_scheduler.h" +#include "amdgpu_virt.h" /* * Modules parameters. @@ -63,6 +65,7 @@ extern int amdgpu_modeset; extern int amdgpu_vram_limit; extern int amdgpu_gart_size; +extern int amdgpu_moverate; extern int amdgpu_benchmarking; extern int amdgpu_testing; extern int amdgpu_audio; @@ -85,8 +88,15 @@ extern int amdgpu_vm_debug; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; extern int amdgpu_powerplay; +extern int amdgpu_powercontainment; extern unsigned amdgpu_pcie_gen_cap; extern unsigned amdgpu_pcie_lane_cap; +extern unsigned amdgpu_cg_mask; +extern unsigned amdgpu_pg_mask; +extern char *amdgpu_disable_cu; +extern int amdgpu_sclk_deep_sleep_en; +extern char *amdgpu_virtual_display; +extern unsigned amdgpu_pp_feature_mask; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -101,7 +111,7 @@ extern unsigned amdgpu_pcie_lane_cap; #define AMDGPU_MAX_RINGS 16 #define AMDGPU_MAX_GFX_RINGS 1 #define AMDGPU_MAX_COMPUTE_RINGS 8 -#define AMDGPU_MAX_VCE_RINGS 2 +#define AMDGPU_MAX_VCE_RINGS 3 /* max number of IP instances */ #define AMDGPU_MAX_SDMA_INSTANCES 2 @@ -183,6 +193,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, int amdgpu_set_powergating_state(struct amdgpu_device *adev, enum amd_ip_block_type block_type, enum amd_powergating_state state); +int amdgpu_wait_for_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); +bool amdgpu_is_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); struct amdgpu_ip_block_version { enum amd_ip_block_type type; @@ -240,10 +254,9 @@ struct amdgpu_vm_pte_funcs { uint64_t pe, uint64_t src, unsigned count); /* write pte one entry at a time with addr mapping */ - void (*write_pte)(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); + void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr); /* for linear pte/pde updates without addr mapping */ void (*set_pte_pde)(struct amdgpu_ib *ib, uint64_t pe, @@ -283,7 +296,8 @@ struct amdgpu_ring_funcs { int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); /* command emit functions */ void (*emit_ib)(struct amdgpu_ring *ring, - struct amdgpu_ib *ib); + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch); void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq, unsigned flags); void (*emit_pipeline_sync)(struct amdgpu_ring *ring); @@ -297,11 +311,20 @@ struct amdgpu_ring_funcs { uint32_t oa_base, uint32_t oa_size); /* testing functions */ int (*test_ring)(struct amdgpu_ring *ring); - int (*test_ib)(struct amdgpu_ring *ring); + int (*test_ib)(struct amdgpu_ring *ring, long timeout); /* insert NOP packets */ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); /* pad the indirect buffer to the necessary number of dw */ void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); + unsigned (*init_cond_exec)(struct amdgpu_ring *ring); + void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); + /* note usage for clock and power gating */ + void (*begin_use)(struct amdgpu_ring *ring); + void (*end_use)(struct amdgpu_ring *ring); + void (*emit_switch_buffer) (struct amdgpu_ring *ring); + void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); + unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring); + unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring); }; /* @@ -365,13 +388,6 @@ struct amdgpu_fence_driver { #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) #define AMDGPU_FENCE_FLAG_INT (1 << 1) -struct amdgpu_user_fence { - /* write-back bo */ - struct amdgpu_bo *bo; - /* write-back address offset to bo start */ - uint32_t offset; -}; - int amdgpu_fence_driver_init(struct amdgpu_device *adev); void amdgpu_fence_driver_fini(struct amdgpu_device *adev); void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); @@ -389,35 +405,8 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); /* - * TTM. + * BO. */ -struct amdgpu_mman { - struct ttm_bo_global_ref bo_global_ref; - struct drm_global_reference mem_global_ref; - struct ttm_bo_device bdev; - bool mem_global_referenced; - bool initialized; - -#if defined(CONFIG_DEBUG_FS) - struct dentry *vram; - struct dentry *gtt; -#endif - - /* buffer handling */ - const struct amdgpu_buffer_funcs *buffer_funcs; - struct amdgpu_ring *buffer_funcs_ring; - /* Scheduler entity for buffer moves */ - struct amd_sched_entity entity; -}; - -int amdgpu_copy_buffer(struct amdgpu_ring *ring, - uint64_t src_offset, - uint64_t dst_offset, - uint32_t byte_count, - struct reservation_object *resv, - struct fence **fence); -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); - struct amdgpu_bo_list_entry { struct amdgpu_bo *robj; struct ttm_validate_buffer tv; @@ -456,8 +445,6 @@ struct amdgpu_bo_va { #define AMDGPU_GEM_DOMAIN_MAX 0x3 struct amdgpu_bo { - /* Protected by gem.mutex */ - struct list_head list; /* Protected by tbo.reserved */ u32 prefered_domains; u32 allowed_domains; @@ -480,10 +467,12 @@ struct amdgpu_bo { struct amdgpu_device *adev; struct drm_gem_object gem_base; struct amdgpu_bo *parent; + struct amdgpu_bo *shadow; struct ttm_bo_kmap_obj dma_buf_vmap; struct amdgpu_mn *mn; struct list_head mn_list; + struct list_head shadow_list; }; #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) @@ -494,9 +483,10 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv); unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); -struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sg); +struct drm_gem_object * +amdgpu_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg); struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gobj, int flags); @@ -586,11 +576,14 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, void *owner); +struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring); struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); -int amdgpu_sync_wait(struct amdgpu_sync *sync); void amdgpu_sync_free(struct amdgpu_sync *sync); int amdgpu_sync_init(void); void amdgpu_sync_fini(void); +int amdgpu_fence_slab_init(void); +void amdgpu_fence_slab_fini(void); /* * GART structures, functions & helpers @@ -609,8 +602,9 @@ struct amdgpu_gart { unsigned num_gpu_pages; unsigned num_cpu_pages; unsigned table_size; +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS struct page **pages; - dma_addr_t *pages_addr; +#endif bool ready; const struct amdgpu_gart_funcs *gart_funcs; }; @@ -623,11 +617,12 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); int amdgpu_gart_init(struct amdgpu_device *adev); void amdgpu_gart_fini(struct amdgpu_device *adev); -void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, +void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages); -int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, +int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, struct page **pagelist, dma_addr_t *dma_addr, uint32_t flags); +int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); /* * GPU MC structures, functions & helpers @@ -654,6 +649,8 @@ struct amdgpu_mc { uint32_t fw_version; struct amdgpu_irq_src vm_fault; uint32_t vram_type; + uint32_t srbm_soft_reset; + struct amdgpu_mode_mc_save save; }; /* @@ -698,17 +695,19 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, */ struct amdgpu_flip_work { - struct work_struct flip_work; + struct delayed_work flip_work; struct work_struct unpin_work; struct amdgpu_device *adev; int crtc_id; + u32 target_vblank; uint64_t base; struct drm_pending_vblank_event *event; - struct amdgpu_bo *old_rbo; + struct amdgpu_bo *old_abo; struct fence *excl; unsigned shared_count; struct fence **shared; struct fence_cb cb; + bool async; }; @@ -721,17 +720,7 @@ struct amdgpu_ib { uint32_t length_dw; uint64_t gpu_addr; uint32_t *ptr; - struct amdgpu_user_fence *user; - struct amdgpu_vm *vm; - unsigned vm_id; - uint64_t vm_pd_addr; - struct amdgpu_ctx *ctx; - uint32_t gds_base, gds_size; - uint32_t gws_base, gws_size; - uint32_t oa_base, oa_size; uint32_t flags; - /* resulting sequence number */ - uint64_t sequence; }; enum amdgpu_ring_type { @@ -742,12 +731,14 @@ enum amdgpu_ring_type { AMDGPU_RING_TYPE_VCE }; -extern struct amd_sched_backend_ops amdgpu_sched_ops; +extern const struct amd_sched_backend_ops amdgpu_sched_ops; int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_job **job); + struct amdgpu_job **job, struct amdgpu_vm *vm); int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, struct amdgpu_job **job); + +void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, @@ -757,14 +748,11 @@ struct amdgpu_ring { struct amdgpu_device *adev; const struct amdgpu_ring_funcs *funcs; struct amdgpu_fence_driver fence_drv; - struct amd_gpu_scheduler sched; + struct amd_gpu_scheduler sched; - spinlock_t fence_lock; struct amdgpu_bo *ring_obj; volatile uint32_t *ring; unsigned rptr_offs; - u64 next_rptr_gpu_addr; - volatile u32 *next_rptr_cpu_addr; unsigned wptr; unsigned wptr_old; unsigned ring_size; @@ -783,11 +771,16 @@ struct amdgpu_ring { u32 doorbell_index; bool use_doorbell; unsigned wptr_offs; - unsigned next_rptr_offs; unsigned fence_offs; - struct amdgpu_ctx *current_ctx; + uint64_t current_ctx; enum amdgpu_ring_type type; char name[16]; + unsigned cond_exe_offs; + u64 cond_exe_gpu_addr; + volatile u32 *cond_exe_cpu_addr; +#if defined(CONFIG_DEBUG_FS) + struct dentry *ent; +#endif }; /* @@ -797,13 +790,17 @@ struct amdgpu_ring { /* maximum number of VMIDs */ #define AMDGPU_NUM_VM 16 +/* Maximum number of PTEs the hardware can write with one command */ +#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF + /* number of entries in page table */ #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 -#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1) -#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK) + +/* LOG2 number of continuous pages for the fragment field */ +#define AMDGPU_LOG2_PAGES_PER_FRAG 4 #define AMDGPU_PTE_VALID (1 << 0) #define AMDGPU_PTE_SYSTEM (1 << 1) @@ -815,10 +812,7 @@ struct amdgpu_ring { #define AMDGPU_PTE_READABLE (1 << 5) #define AMDGPU_PTE_WRITEABLE (1 << 6) -/* PTE (Page Table Entry) fragment field for different page sizes */ -#define AMDGPU_PTE_FRAG_4KB (0 << 7) -#define AMDGPU_PTE_FRAG_64KB (4 << 7) -#define AMDGPU_LOG2_PAGES_PER_FRAG 4 +#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) /* How to programm VM fault handling */ #define AMDGPU_VM_FAULT_STOP_NEVER 0 @@ -828,13 +822,7 @@ struct amdgpu_ring { struct amdgpu_vm_pt { struct amdgpu_bo_list_entry entry; uint64_t addr; -}; - -struct amdgpu_vm_id { - struct amdgpu_vm_manager_id *mgr_id; - uint64_t pd_gpu_addr; - /* last flushed PD/PT update */ - struct fence *flushed_updates; + uint64_t shadow_addr; }; struct amdgpu_vm { @@ -857,24 +845,36 @@ struct amdgpu_vm { struct amdgpu_bo *page_directory; unsigned max_pde_used; struct fence *page_directory_fence; + uint64_t last_eviction_counter; /* array of page tables, one for each page directory entry */ struct amdgpu_vm_pt *page_tables; /* for id and flush management per ring */ - struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; + struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; /* protecting freed */ spinlock_t freed_lock; /* Scheduler entity for page table updates */ struct amd_sched_entity entity; + + /* client id */ + u64 client_id; }; -struct amdgpu_vm_manager_id { +struct amdgpu_vm_id { struct list_head list; - struct fence *active; - atomic_long_t owner; + struct fence *first; + struct amdgpu_sync active; + struct fence *last_flush; + atomic64_t owner; + + uint64_t pd_gpu_addr; + /* last flushed PD/PT update */ + struct fence *flushed_updates; + + uint32_t current_gpu_reset_count; uint32_t gds_base; uint32_t gds_size; @@ -889,7 +889,11 @@ struct amdgpu_vm_manager { struct mutex lock; unsigned num_ids; struct list_head ids_lru; - struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM]; + struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; + + /* Handling of VM fences */ + u64 fence_context; + unsigned seqno[AMDGPU_MAX_RINGS]; uint32_t max_pfn; /* vram base address for page table entry */ @@ -901,6 +905,8 @@ struct amdgpu_vm_manager { struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; unsigned vm_pte_num_rings; atomic_t vm_pte_next_ring; + /* client id counter */ + atomic64_t client_counter; }; void amdgpu_vm_manager_init(struct amdgpu_device *adev); @@ -910,19 +916,15 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); -void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); +void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct list_head *duplicates); void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct fence *fence, - unsigned *vm_id, uint64_t *vm_pd_addr); -void amdgpu_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr, - uint32_t gds_base, uint32_t gds_size, - uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size); + struct amdgpu_job *job); +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); -uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, @@ -931,7 +933,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_sync *sync); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - struct ttm_mem_reg *mem); + bool clear); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo); struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, @@ -966,6 +968,7 @@ struct amdgpu_ctx { spinlock_t ring_lock; struct fence **fences; struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; + bool preamble_presented; }; struct amdgpu_ctx_mgr { @@ -1026,6 +1029,11 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list); */ #include "clearstate_defs.h" +struct amdgpu_rlc_funcs { + void (*enter_safe_mode)(struct amdgpu_device *adev); + void (*exit_safe_mode)(struct amdgpu_device *adev); +}; + struct amdgpu_rlc { /* for power gating */ struct amdgpu_bo *save_restore_obj; @@ -1044,6 +1052,24 @@ struct amdgpu_rlc { uint64_t cp_table_gpu_addr; volatile uint32_t *cp_table_ptr; u32 cp_table_size; + + /* safe mode for updating CG/PG state */ + bool in_safe_mode; + const struct amdgpu_rlc_funcs *funcs; + + /* for firmware data */ + u32 save_and_restore_offset; + u32 clear_state_descriptor_offset; + u32 avail_scratch_ram_locations; + u32 reg_restore_list_size; + u32 reg_list_format_start; + u32 reg_list_format_separate_start; + u32 starting_offsets_start; + u32 reg_list_format_size_bytes; + u32 reg_list_size_bytes; + + u32 *register_list_format; + u32 *register_restore; }; struct amdgpu_mec { @@ -1097,6 +1123,18 @@ struct amdgpu_gca_config { uint32_t macrotile_mode_array[16]; }; +struct amdgpu_cu_info { + uint32_t number; /* total active CU number */ + uint32_t ao_cu_mask; + uint32_t bitmap[4][4]; +}; + +struct amdgpu_gfx_funcs { + /* get the gpu clock counter */ + uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); + void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); +}; + struct amdgpu_gfx { struct mutex gpu_clock_mutex; struct amdgpu_gca_config config; @@ -1129,17 +1167,24 @@ struct amdgpu_gfx { struct amdgpu_irq_src priv_reg_irq; struct amdgpu_irq_src priv_inst_irq; /* gfx status */ - uint32_t gfx_current_status; + uint32_t gfx_current_status; /* ce ram size*/ - unsigned ce_ram_size; + unsigned ce_ram_size; + struct amdgpu_cu_info cu_info; + const struct amdgpu_gfx_funcs *funcs; + + /* reset mask */ + uint32_t grbm_soft_reset; + uint32_t srbm_soft_reset; }; int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); -void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f); +void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, + struct fence *f); int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_ib *ib, struct fence *last_vm_update, - struct fence **f); + struct amdgpu_job *job, struct fence **f); int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); @@ -1148,10 +1193,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, - uint32_t **data); -int amdgpu_ring_restore(struct amdgpu_ring *ring, - unsigned size, uint32_t *data); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned ring_size, u32 nop, u32 align_mask, struct amdgpu_irq_src *irq_src, unsigned irq_type, @@ -1164,7 +1205,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring); struct amdgpu_cs_chunk { uint32_t chunk_id; uint32_t length_dw; - uint32_t *kdata; + void *kdata; }; struct amdgpu_cs_parser { @@ -1187,21 +1228,39 @@ struct amdgpu_cs_parser { struct fence *fence; uint64_t bytes_moved_threshold; uint64_t bytes_moved; + struct amdgpu_bo_list_entry *evictable; /* user fence */ struct amdgpu_bo_list_entry uf_entry; }; +#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */ +#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */ +#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */ + struct amdgpu_job { struct amd_sched_job base; struct amdgpu_device *adev; + struct amdgpu_vm *vm; struct amdgpu_ring *ring; struct amdgpu_sync sync; struct amdgpu_ib *ibs; struct fence *fence; /* the hw fence */ + uint32_t preamble_status; uint32_t num_ibs; void *owner; - struct amdgpu_user_fence uf; + uint64_t fence_ctx; /* the fence_context this job uses */ + bool vm_needs_flush; + unsigned vm_id; + uint64_t vm_pd_addr; + uint32_t gds_base, gds_size; + uint32_t gws_base, gws_size; + uint32_t oa_base, oa_size; + + /* user fence handling */ + uint64_t uf_addr; + uint64_t uf_sequence; + }; #define to_amdgpu_job(sched_job) \ container_of((sched_job), struct amdgpu_job, base) @@ -1501,6 +1560,12 @@ struct amdgpu_dpm_funcs { u32 (*get_fan_control_mode)(struct amdgpu_device *adev); int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); + int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); + int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); + int (*get_sclk_od)(struct amdgpu_device *adev); + int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); + int (*get_mclk_od)(struct amdgpu_device *adev); + int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); }; struct amdgpu_dpm { @@ -1582,10 +1647,12 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev); /* * UVD */ -#define AMDGPU_MAX_UVD_HANDLES 10 -#define AMDGPU_UVD_STACK_SIZE (1024*1024) -#define AMDGPU_UVD_HEAP_SIZE (1024*1024) -#define AMDGPU_UVD_FIRMWARE_OFFSET 256 +#define AMDGPU_DEFAULT_UVD_HANDLES 10 +#define AMDGPU_MAX_UVD_HANDLES 40 +#define AMDGPU_UVD_STACK_SIZE (200*1024) +#define AMDGPU_UVD_HEAP_SIZE (256*1024) +#define AMDGPU_UVD_SESSION_SIZE (50*1024) +#define AMDGPU_UVD_FIRMWARE_OFFSET 256 struct amdgpu_uvd { struct amdgpu_bo *vcpu_bo; @@ -1593,6 +1660,7 @@ struct amdgpu_uvd { uint64_t gpu_addr; unsigned fw_version; void *saved_bo; + unsigned max_handles; atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct delayed_work idle_work; @@ -1600,7 +1668,9 @@ struct amdgpu_uvd { struct amdgpu_ring ring; struct amdgpu_irq_src irq; bool address_64_bit; + bool use_ctx_buf; struct amd_sched_entity entity; + uint32_t srbm_soft_reset; }; /* @@ -1621,11 +1691,14 @@ struct amdgpu_vce { struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; struct delayed_work idle_work; + struct mutex idle_mutex; const struct firmware *fw; /* VCE firmware */ struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; unsigned harvest_config; struct amd_sched_entity entity; + uint32_t srbm_soft_reset; + unsigned num_rings; }; /* @@ -1643,9 +1716,14 @@ struct amdgpu_sdma_instance { struct amdgpu_sdma { struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; +#ifdef CONFIG_DRM_AMDGPU_SI + //SI DMA has a difference trap irq number for the second engine + struct amdgpu_irq_src trap_irq_1; +#endif struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src illegal_inst_irq; - int num_instances; + int num_instances; + uint32_t srbm_soft_reset; }; /* @@ -1691,12 +1769,12 @@ static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} * Debugfs */ struct amdgpu_debugfs { - struct drm_info_list *files; + const struct drm_info_list *files; unsigned num_files; }; int amdgpu_debugfs_add_files(struct amdgpu_device *adev, - struct drm_info_list *files, + const struct drm_info_list *files, unsigned nfiles); int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); @@ -1705,6 +1783,8 @@ int amdgpu_debugfs_init(struct drm_minor *minor); void amdgpu_debugfs_cleanup(struct drm_minor *minor); #endif +int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); + /* * amdgpu smumgr functions */ @@ -1738,13 +1818,6 @@ struct amdgpu_allowed_register_entry { bool grbm_indexed; }; -struct amdgpu_cu_info { - uint32_t number; /* total active CU number */ - uint32_t ao_cu_mask; - uint32_t bitmap[4][4]; -}; - - /* * ASIC specific functions. */ @@ -1752,20 +1825,19 @@ struct amdgpu_asic_funcs { bool (*read_disabled_bios)(struct amdgpu_device *adev); bool (*read_bios_from_rom)(struct amdgpu_device *adev, u8 *bios, u32 length_bytes); + void (*detect_hw_virtualization) (struct amdgpu_device *adev); int (*read_register)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 reg_offset, u32 *value); void (*set_vga_state)(struct amdgpu_device *adev, bool state); int (*reset)(struct amdgpu_device *adev); - /* wait for mc_idle */ - int (*wait_for_mc_idle)(struct amdgpu_device *adev); /* get the reference clock */ u32 (*get_xclk)(struct amdgpu_device *adev); - /* get the gpu clock counter */ - uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); - int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info); /* MM block clocks */ int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); + /* static power management */ + int (*get_pcie_lanes)(struct amdgpu_device *adev); + void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); }; /* @@ -1855,21 +1927,8 @@ struct amdgpu_atcs { /* * CGS */ -void *amdgpu_cgs_create_device(struct amdgpu_device *adev); -void amdgpu_cgs_destroy_device(void *cgs_device); - - -/* - * CGS - */ -void *amdgpu_cgs_create_device(struct amdgpu_device *adev); -void amdgpu_cgs_destroy_device(void *cgs_device); - - -/* GPU virtualization */ -struct amdgpu_virtualization { - bool supports_sr_iov; -}; +struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); +void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); /* * Core structure, functions and helpers. @@ -1884,6 +1943,8 @@ struct amdgpu_ip_block_status { bool valid; bool sw; bool hw; + bool late_initialized; + bool hang; }; struct amdgpu_device { @@ -1904,16 +1965,15 @@ struct amdgpu_device { int usec_timeout; const struct amdgpu_asic_funcs *asic_funcs; bool shutdown; - bool suspend; bool need_dma32; bool accel_working; - struct work_struct reset_work; + struct work_struct reset_work; struct notifier_block acpi_nb; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; - unsigned debugfs_count; + unsigned debugfs_count; #if defined(CONFIG_DEBUG_FS) - struct dentry *debugfs_regs; + struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; #endif struct amdgpu_atif atif; struct amdgpu_atcs atcs; @@ -1926,7 +1986,6 @@ struct amdgpu_device { /* BIOS */ uint8_t *bios; bool is_atom_bios; - uint16_t bios_header_start; struct amdgpu_bo *stollen_vga_memory; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; @@ -1944,6 +2003,8 @@ struct amdgpu_device { spinlock_t pcie_idx_lock; amdgpu_rreg_t pcie_rreg; amdgpu_wreg_t pcie_wreg; + amdgpu_rreg_t pciep_rreg; + amdgpu_wreg_t pciep_wreg; /* protects concurrent UVD register access */ spinlock_t uvd_ctx_idx_lock; amdgpu_rreg_t uvd_ctx_rreg; @@ -1952,6 +2013,10 @@ struct amdgpu_device { spinlock_t didt_idx_lock; amdgpu_rreg_t didt_rreg; amdgpu_wreg_t didt_wreg; + /* protects concurrent gc_cac register access */ + spinlock_t gc_cac_idx_lock; + amdgpu_rreg_t gc_cac_rreg; + amdgpu_wreg_t gc_cac_wreg; /* protects concurrent ENDPOINT (audio) register access */ spinlock_t audio_endpt_idx_lock; amdgpu_block_rreg_t audio_endpt_rreg; @@ -1977,9 +2042,19 @@ struct amdgpu_device { atomic64_t vram_vis_usage; atomic64_t gtt_usage; atomic64_t num_bytes_moved; + atomic64_t num_evictions; atomic_t gpu_reset_counter; + /* data for buffer migration throttling */ + struct { + spinlock_t lock; + s64 last_update_us; + s64 accum_us; /* accumulated microseconds */ + u32 log2_max_MBps; + } mm_stats; + /* display */ + bool enable_virtual_display; struct amdgpu_mode_info mode_info; struct work_struct hotplug_work; struct amdgpu_irq_src crtc_irq; @@ -1987,7 +2062,7 @@ struct amdgpu_device { struct amdgpu_irq_src hpd_irq; /* rings */ - unsigned fence_context; + u64 fence_context; unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; bool ib_pool_ready; @@ -2042,6 +2117,14 @@ struct amdgpu_device { struct kfd_dev *kfd; struct amdgpu_virtualization virtualization; + + /* link all shadow bo */ + struct list_head shadow_list; + struct mutex shadow_list_lock; + /* link all gtt */ + spinlock_t gtt_list_lock; + struct list_head gtt_list; + }; bool amdgpu_device_is_px(struct drm_device *dev); @@ -2074,12 +2157,16 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) +#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) +#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) +#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) +#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) #define WREG32_P(reg, val, mask) \ @@ -2115,6 +2202,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); #define REG_GET_FIELD(value, reg, field) \ (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) +#define WREG32_FIELD(reg, field, val) \ + WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) + /* * BIOS helpers. */ @@ -2155,34 +2245,41 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) */ #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) -#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev)) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) +#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) +#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) +#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) -#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info)) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) -#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) +#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) -#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) +#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) -#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) +#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c)) #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r)) +#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) +#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) +#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) +#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) +#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r)) +#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r)) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) @@ -2196,7 +2293,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) -#define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base)) +#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async)) #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) @@ -2211,6 +2308,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) +#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) +#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) + +#define amdgpu_dpm_read_sensor(adev, idx, value) \ + ((adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \ + -EINVAL) #define amdgpu_dpm_get_temperature(adev) \ ((adev)->pp_enabled ? \ @@ -2263,11 +2367,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ (adev)->pm.funcs->powergate_vce((adev), (g))) -#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ - (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))) - #define amdgpu_dpm_get_current_power_state(adev) \ (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) @@ -2289,6 +2388,18 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_force_clock_level(adev, type, level) \ (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) +#define amdgpu_dpm_get_sclk_od(adev) \ + (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) + +#define amdgpu_dpm_set_sclk_od(adev, value) \ + (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) + +#define amdgpu_dpm_get_mclk_od(adev) \ + ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_set_mclk_od(adev, value) \ + ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) + #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) @@ -2296,6 +2407,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) /* Common functions */ int amdgpu_gpu_reset(struct amdgpu_device *adev); +bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); @@ -2304,7 +2416,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, u32 ip_instance, u32 ring, struct amdgpu_ring **out_ring); -void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); +void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, @@ -2321,6 +2433,10 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev); +int amdgpu_ttm_global_init(struct amdgpu_device *adev); +int amdgpu_ttm_init(struct amdgpu_device *adev); +void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_program_register_sequence(struct amdgpu_device *adev, const u32 *registers, const u32 array_size); @@ -2330,16 +2446,22 @@ bool amdgpu_device_is_px(struct drm_device *dev); #if defined(CONFIG_VGA_SWITCHEROO) void amdgpu_register_atpx_handler(void); void amdgpu_unregister_atpx_handler(void); +bool amdgpu_has_atpx_dgpu_power_cntl(void); +bool amdgpu_is_atpx_hybrid(void); +bool amdgpu_atpx_dgpu_req_power_for_displays(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} +static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } +static inline bool amdgpu_is_atpx_hybrid(void) { return false; } +static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } #endif /* * KMS */ extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; -extern int amdgpu_max_kms_ioctl; +extern const int amdgpu_max_kms_ioctl; int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); int amdgpu_driver_unload_kms(struct drm_device *dev); @@ -2349,8 +2471,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv); -int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon); -int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon); +int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); +int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); @@ -2396,7 +2518,7 @@ static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } struct amdgpu_bo_va_mapping * amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo); +int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser); #include "amdgpu_object.h" - #endif |