123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200 |
- /*
- * Copyright 2018 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
- #ifndef __AMDGPU_GMC_H__
- #define __AMDGPU_GMC_H__
- #include <linux/types.h>
- #include "amdgpu_irq.h"
- /* VA hole for 48bit addresses on Vega10 */
- #define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL
- #define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL
- /*
- * Hardware is programmed as if the hole doesn't exists with start and end
- * address values.
- *
- * This mask is used to remove the upper 16bits of the VA and so come up with
- * the linear addr value.
- */
- #define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
- struct firmware;
- /*
- * VMHUB structures, functions & helpers
- */
- struct amdgpu_vmhub {
- uint32_t ctx0_ptb_addr_lo32;
- uint32_t ctx0_ptb_addr_hi32;
- uint32_t vm_inv_eng0_req;
- uint32_t vm_inv_eng0_ack;
- uint32_t vm_context0_cntl;
- uint32_t vm_l2_pro_fault_status;
- uint32_t vm_l2_pro_fault_cntl;
- };
- /*
- * GPU MC structures, functions & helpers
- */
- struct amdgpu_gmc_funcs {
- /* flush the vm tlb via mmio */
- void (*flush_gpu_tlb)(struct amdgpu_device *adev,
- uint32_t vmid);
- /* flush the vm tlb via ring */
- uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
- uint64_t pd_addr);
- /* Change the VMID -> PASID mapping */
- void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
- unsigned pasid);
- /* write pte/pde updates using the cpu */
- int (*set_pte_pde)(struct amdgpu_device *adev,
- void *cpu_pt_addr, /* cpu addr of page table */
- uint32_t gpu_page_idx, /* pte/pde to update */
- uint64_t addr, /* addr to write into pte/pde */
- uint64_t flags); /* access flags */
- /* enable/disable PRT support */
- void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* set pte flags based per asic */
- uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
- uint32_t flags);
- /* get the pde for a given mc addr */
- void (*get_vm_pde)(struct amdgpu_device *adev, int level,
- u64 *dst, u64 *flags);
- };
- struct amdgpu_xgmi {
- /* from psp */
- u64 device_id;
- u64 hive_id;
- /* fixed per family */
- u64 node_segment_size;
- /* physical node (0-3) */
- unsigned physical_node_id;
- /* number of nodes (0-4) */
- unsigned num_physical_nodes;
- /* gpu list in the same hive */
- struct list_head head;
- };
- struct amdgpu_gmc {
- resource_size_t aper_size;
- resource_size_t aper_base;
- /* for some chips with <= 32MB we need to lie
- * about vram size near mc fb location */
- u64 mc_vram_size;
- u64 visible_vram_size;
- u64 agp_size;
- u64 agp_start;
- u64 agp_end;
- u64 gart_size;
- u64 gart_start;
- u64 gart_end;
- u64 vram_start;
- u64 vram_end;
- /* FB region , it's same as local vram region in single GPU, in XGMI
- * configuration, this region covers all GPUs in the same hive ,
- * each GPU in the hive has the same view of this FB region .
- * GPU0's vram starts at offset (0 * segment size) ,
- * GPU1 starts at offset (1 * segment size), etc.
- */
- u64 fb_start;
- u64 fb_end;
- unsigned vram_width;
- u64 real_vram_size;
- int vram_mtrr;
- u64 mc_mask;
- const struct firmware *fw; /* MC firmware */
- uint32_t fw_version;
- struct amdgpu_irq_src vm_fault;
- uint32_t vram_type;
- uint32_t srbm_soft_reset;
- bool prt_warning;
- uint64_t stolen_size;
- /* apertures */
- u64 shared_aperture_start;
- u64 shared_aperture_end;
- u64 private_aperture_start;
- u64 private_aperture_end;
- /* protects concurrent invalidation */
- spinlock_t invalidate_lock;
- bool translate_further;
- struct kfd_vm_fault_info *vm_fault_info;
- atomic_t vm_fault_info_updated;
- const struct amdgpu_gmc_funcs *gmc_funcs;
- struct amdgpu_xgmi xgmi;
- };
- #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
- #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
- #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
- #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
- #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
- #define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
- /**
- * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
- *
- * @adev: amdgpu_device pointer
- *
- * Returns:
- * True if full VRAM is visible through the BAR
- */
- static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
- {
- WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
- return (gmc->real_vram_size == gmc->visible_vram_size);
- }
- /**
- * amdgpu_gmc_sign_extend - sign extend the given gmc address
- *
- * @addr: address to extend
- */
- static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
- {
- if (addr >= AMDGPU_GMC_HOLE_START)
- addr |= AMDGPU_GMC_HOLE_END;
- return addr;
- }
- void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
- uint64_t *addr, uint64_t *flags);
- uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
- uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
- void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
- u64 base);
- void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
- struct amdgpu_gmc *mc);
- void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
- struct amdgpu_gmc *mc);
- #endif
|