diff options
Diffstat (limited to 'drivers')
185 files changed, 19744 insertions, 5396 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 4431129..0f7d28a 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -845,6 +845,8 @@ void intel_gtt_insert_page(dma_addr_t addr, unsigned int flags) { intel_private.driver->write_entry(addr, pg, flags); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); } EXPORT_SYMBOL(intel_gtt_insert_page); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index c02be6a..536b5d4 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -147,9 +147,6 @@ config DRM_AMDGPU If M is selected, the module will be called amdgpu. source "drivers/gpu/drm/amd/amdgpu/Kconfig" -source "drivers/gpu/drm/amd/powerplay/Kconfig" - -source "drivers/gpu/drm/amd/acp/Kconfig" source "drivers/gpu/drm/nouveau/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 12a966e..439d89b 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -48,7 +48,7 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/ obj-$(CONFIG_DRM_MGA) += mga/ obj-$(CONFIG_DRM_I810) += i810/ -obj-$(CONFIG_DRM_I915) += i915/ +obj-$(CONFIG_DRM_I915) += i915/ obj-$(CONFIG_DRM_MGAG200) += mgag200/ obj-$(CONFIG_DRM_VC4) += vc4/ obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/ diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 7335c04..f3cb69d 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -25,3 +25,5 @@ config DRM_AMDGPU_GART_DEBUGFS Selecting this option creates a debugfs file to inspect the mapped pages. Uses more memory for housekeeping, enable only for debugging. +source "drivers/gpu/drm/amd/powerplay/Kconfig" +source "drivers/gpu/drm/amd/acp/Kconfig" diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index c7fcdce..21dd7c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -58,7 +58,8 @@ amdgpu-y += \ # add DCE block amdgpu-y += \ dce_v10_0.o \ - dce_v11_0.o + dce_v11_0.o \ + dce_virtual.o # add GFX block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h index 0619269..b8d6667 100644 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h @@ -90,6 +90,7 @@ #define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24 #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25 #define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27 +#define ENCODER_OBJECT_ID_VIRTUAL 0x28 #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF @@ -119,6 +120,7 @@ #define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_MXM 0x15 #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 +#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17 /* deleted */ @@ -147,6 +149,7 @@ #define GRAPH_OBJECT_ENUM_ID5 0x05 #define GRAPH_OBJECT_ENUM_ID6 0x06 #define GRAPH_OBJECT_ENUM_ID7 0x07 +#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08 /****************************************************/ /* Graphics Object ID Bit definition */ @@ -408,6 +411,10 @@ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT) +#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT) + /****************************************************/ /* Connector Object ID definition - Shared with BIOS */ /****************************************************/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8ebc5f1..3cc2629 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -51,6 +51,7 @@ #include "amdgpu_ih.h" #include "amdgpu_irq.h" #include "amdgpu_ucode.h" +#include "amdgpu_ttm.h" #include "amdgpu_gds.h" #include "amd_powerplay.h" #include "amdgpu_acp.h" @@ -91,6 +92,8 @@ extern unsigned amdgpu_pcie_lane_cap; extern unsigned amdgpu_cg_mask; extern unsigned amdgpu_pg_mask; extern char *amdgpu_disable_cu; +extern int amdgpu_sclk_deep_sleep_en; +extern char *amdgpu_virtual_display; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -248,10 +251,9 @@ struct amdgpu_vm_pte_funcs { uint64_t pe, uint64_t src, unsigned count); /* write pte one entry at a time with addr mapping */ - void (*write_pte)(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); + void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr); /* for linear pte/pde updates without addr mapping */ void (*set_pte_pde)(struct amdgpu_ib *ib, uint64_t pe, @@ -396,46 +398,9 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); /* - * TTM. + * BO. */ -#define AMDGPU_TTM_LRU_SIZE 20 - -struct amdgpu_mman_lru { - struct list_head *lru[TTM_NUM_MEM_TYPES]; - struct list_head *swap_lru; -}; - -struct amdgpu_mman { - struct ttm_bo_global_ref bo_global_ref; - struct drm_global_reference mem_global_ref; - struct ttm_bo_device bdev; - bool mem_global_referenced; - bool initialized; - -#if defined(CONFIG_DEBUG_FS) - struct dentry *vram; - struct dentry *gtt; -#endif - - /* buffer handling */ - const struct amdgpu_buffer_funcs *buffer_funcs; - struct amdgpu_ring *buffer_funcs_ring; - /* Scheduler entity for buffer moves */ - struct amd_sched_entity entity; - - /* custom LRU management */ - struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; -}; - -int amdgpu_copy_buffer(struct amdgpu_ring *ring, - uint64_t src_offset, - uint64_t dst_offset, - uint32_t byte_count, - struct reservation_object *resv, - struct fence **fence); -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); - struct amdgpu_bo_list_entry { struct amdgpu_bo *robj; struct ttm_validate_buffer tv; @@ -498,10 +463,12 @@ struct amdgpu_bo { struct amdgpu_device *adev; struct drm_gem_object gem_base; struct amdgpu_bo *parent; + struct amdgpu_bo *shadow; struct ttm_bo_kmap_obj dma_buf_vmap; struct amdgpu_mn *mn; struct list_head mn_list; + struct list_head shadow_list; }; #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) @@ -677,6 +644,8 @@ struct amdgpu_mc { uint32_t fw_version; struct amdgpu_irq_src vm_fault; uint32_t vram_type; + uint32_t srbm_soft_reset; + struct amdgpu_mode_mc_save save; }; /* @@ -721,10 +690,11 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, */ struct amdgpu_flip_work { - struct work_struct flip_work; + struct delayed_work flip_work; struct work_struct unpin_work; struct amdgpu_device *adev; int crtc_id; + u32 target_vblank; uint64_t base; struct drm_pending_vblank_event *event; struct amdgpu_bo *old_rbo; @@ -815,13 +785,17 @@ struct amdgpu_ring { /* maximum number of VMIDs */ #define AMDGPU_NUM_VM 16 +/* Maximum number of PTEs the hardware can write with one command */ +#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF + /* number of entries in page table */ #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 -#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1) -#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK) + +/* LOG2 number of continuous pages for the fragment field */ +#define AMDGPU_LOG2_PAGES_PER_FRAG 4 #define AMDGPU_PTE_VALID (1 << 0) #define AMDGPU_PTE_SYSTEM (1 << 1) @@ -833,10 +807,7 @@ struct amdgpu_ring { #define AMDGPU_PTE_READABLE (1 << 5) #define AMDGPU_PTE_WRITEABLE (1 << 6) -/* PTE (Page Table Entry) fragment field for different page sizes */ -#define AMDGPU_PTE_FRAG_4KB (0 << 7) -#define AMDGPU_PTE_FRAG_64KB (4 << 7) -#define AMDGPU_LOG2_PAGES_PER_FRAG 4 +#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) /* How to programm VM fault handling */ #define AMDGPU_VM_FAULT_STOP_NEVER 0 @@ -846,6 +817,7 @@ struct amdgpu_ring { struct amdgpu_vm_pt { struct amdgpu_bo_list_entry entry; uint64_t addr; + uint64_t shadow_addr; }; struct amdgpu_vm { @@ -948,7 +920,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); -uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, @@ -957,7 +928,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_sync *sync); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - struct ttm_mem_reg *mem); + bool clear); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo); struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, @@ -1195,6 +1166,10 @@ struct amdgpu_gfx { unsigned ce_ram_size; struct amdgpu_cu_info cu_info; const struct amdgpu_gfx_funcs *funcs; + + /* reset mask */ + uint32_t grbm_soft_reset; + uint32_t srbm_soft_reset; }; int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -1683,6 +1658,7 @@ struct amdgpu_uvd { bool address_64_bit; bool use_ctx_buf; struct amd_sched_entity entity; + uint32_t srbm_soft_reset; }; /* @@ -1709,6 +1685,7 @@ struct amdgpu_vce { struct amdgpu_irq_src irq; unsigned harvest_config; struct amd_sched_entity entity; + uint32_t srbm_soft_reset; }; /* @@ -1729,6 +1706,7 @@ struct amdgpu_sdma { struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src illegal_inst_irq; int num_instances; + uint32_t srbm_soft_reset; }; /* @@ -1956,6 +1934,7 @@ struct amdgpu_ip_block_status { bool valid; bool sw; bool hw; + bool hang; }; struct amdgpu_device { @@ -2055,6 +2034,7 @@ struct amdgpu_device { atomic_t gpu_reset_counter; /* display */ + bool enable_virtual_display; struct amdgpu_mode_info mode_info; struct work_struct hotplug_work; struct amdgpu_irq_src crtc_irq; @@ -2117,6 +2097,10 @@ struct amdgpu_device { struct kfd_dev *kfd; struct amdgpu_virtualization virtualization; + + /* link all shadow bo */ + struct list_head shadow_list; + struct mutex shadow_list_lock; }; bool amdgpu_device_is_px(struct drm_device *dev); @@ -2192,6 +2176,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); #define REG_GET_FIELD(value, reg, field) \ (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) +#define WREG32_FIELD(reg, field, val) \ + WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) + /* * BIOS helpers. */ @@ -2242,7 +2229,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) -#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) +#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) @@ -2387,6 +2374,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) /* Common functions */ int amdgpu_gpu_reset(struct amdgpu_device *adev); +bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); @@ -2412,6 +2400,8 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev); +int amdgpu_ttm_global_init(struct amdgpu_device *adev); void amdgpu_program_register_sequence(struct amdgpu_device *adev, const u32 *registers, const u32 array_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 9831753..1b62116 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -259,6 +259,33 @@ static const int object_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown }; +bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct atom_context *ctx = mode_info->atom_context; + int index = GetIndexIntoMasterTable(DATA, Object_Header); + u16 size, data_offset; + u8 frev, crev; + ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; + ATOM_OBJECT_HEADER *obj_header; + + if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) + return false; + + if (crev < 2) + return false; + + obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); + path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) + (ctx->bios + data_offset + + le16_to_cpu(obj_header->usDisplayPathTableOffset)); + + if (path_obj->ucNumOfDispPath) + return true; + else + return false; +} + bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev) { struct amdgpu_mode_info *mode_info = &adev->mode_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 8c2e696..15dd43e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -140,6 +140,8 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device * uint8_t id); void amdgpu_atombios_i2c_init(struct amdgpu_device *adev); +bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev); + bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev); int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 33e47a4..3453052 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -39,7 +39,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, start_jiffies = jiffies; for (i = 0; i < n; i++) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence); + r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence, + false); if (r) goto exit_do_move; r = fence_wait(fence, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index ff0b55a..319a5e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -1504,6 +1504,86 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { .force = amdgpu_connector_dvi_force, }; +static struct drm_encoder * +amdgpu_connector_virtual_encoder(struct drm_connector *connector) +{ + int enc_id = connector->encoder_ids[0]; + struct drm_encoder *encoder; + int i; + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] == 0) + break; + + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) + continue; + + if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) + return encoder; + } + + /* pick the first one */ + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); + return NULL; +} + +static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector) +{ + struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); + + if (encoder) { + amdgpu_connector_add_common_modes(encoder, connector); + } + + return 0; +} + +static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +int amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode) +{ + return 0; +} + +static enum drm_connector_status + +amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +int amdgpu_connector_virtual_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + return 0; +} + +static void amdgpu_connector_virtual_force(struct drm_connector *connector) +{ + return; +} + +static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = { + .get_modes = amdgpu_connector_virtual_get_modes, + .mode_valid = amdgpu_connector_virtual_mode_valid, + .best_encoder = amdgpu_connector_virtual_encoder, +}; + +static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = { + .dpms = amdgpu_connector_virtual_dpms, + .detect = amdgpu_connector_virtual_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = amdgpu_connector_virtual_set_property, + .destroy = amdgpu_connector_destroy, + .force = amdgpu_connector_virtual_force, +}; + void amdgpu_connector_add(struct amdgpu_device *adev, uint32_t connector_id, @@ -1888,6 +1968,17 @@ amdgpu_connector_add(struct amdgpu_device *adev, connector->interlace_allowed = false; connector->doublescan_allowed = false; break; + case DRM_MODE_CONNECTOR_VIRTUAL: + amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); + if (!amdgpu_dig_connector) + goto failed; + amdgpu_connector->con_priv = amdgpu_dig_connector; + drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type); + drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs); + subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 0307ff5..d80e5d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -287,18 +287,56 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) return max(bytes_moved_threshold, 1024*1024ull); } +static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, + struct amdgpu_bo *bo) +{ + u64 initial_bytes_moved; + uint32_t domain; + int r; + + if (bo->pin_count) + return 0; + + /* Avoid moving this one if we have moved too many buffers + * for this IB already. + * + * Note that this allows moving at least one buffer of + * any size, because it doesn't take the current "bo" + * into account. We don't want to disallow buffer moves + * completely. + */ + if (p->bytes_moved <= p->bytes_moved_threshold) + domain = bo->prefered_domains; + else + domain = bo->allowed_domains; + +retry: + amdgpu_ttm_placement_from_domain(bo, domain); + initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - + initial_bytes_moved; + + if (unlikely(r)) { + if (r != -ERESTARTSYS && domain != bo->allowed_domains) { + domain = bo->allowed_domains; + goto retry; + } + } + + return r; +} + int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, struct list_head *validated) { struct amdgpu_bo_list_entry *lobj; - u64 initial_bytes_moved; int r; list_for_each_entry(lobj, validated, tv.head) { struct amdgpu_bo *bo = lobj->robj; bool binding_userptr = false; struct mm_struct *usermm; - uint32_t domain; usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); if (usermm && usermm != current->mm) @@ -313,35 +351,13 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, binding_userptr = true; } - if (bo->pin_count) - continue; - - /* Avoid moving this one if we have moved too many buffers - * for this IB already. - * - * Note that this allows moving at least one buffer of - * any size, because it doesn't take the current "bo" - * into account. We don't want to disallow buffer moves - * completely. - */ - if (p->bytes_moved <= p->bytes_moved_threshold) - domain = bo->prefered_domains; - else - domain = bo->allowed_domains; - - retry: - amdgpu_ttm_placement_from_domain(bo, domain); - initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - - initial_bytes_moved; - - if (unlikely(r)) { - if (r != -ERESTARTSYS && domain != bo->allowed_domains) { - domain = bo->allowed_domains; - goto retry; - } + r = amdgpu_cs_bo_validate(p, bo); + if (r) return r; + if (bo->shadow) { + r = amdgpu_cs_bo_validate(p, bo); + if (r) + return r; } if (binding_userptr) { @@ -386,8 +402,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); goto error_free_pages; + } /* Without a BO list we don't have userptr BOs */ if (!p->bo_list) @@ -427,9 +445,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, /* Unreserve everything again. */ ttm_eu_backoff_reservation(&p->ticket, &p->validated); - /* We tried to often, just abort */ + /* We tried too many times, just abort */ if (!--tries) { r = -EDEADLK; + DRM_ERROR("deadlock in %s\n", __func__); goto error_free_pages; } @@ -441,11 +460,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, sizeof(struct page*)); if (!e->user_pages) { r = -ENOMEM; + DRM_ERROR("calloc failure in %s\n", __func__); goto error_free_pages; } r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages); if (r) { + DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n"); drm_free_large(e->user_pages); e->user_pages = NULL; goto error_free_pages; @@ -462,12 +483,16 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, p->bytes_moved = 0; r = amdgpu_cs_list_validate(p, &duplicates); - if (r) + if (r) { + DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); goto error_validate; + } r = amdgpu_cs_list_validate(p, &p->validated); - if (r) + if (r) { + DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n"); goto error_validate; + } fpriv->vm.last_eviction_counter = atomic64_read(&p->adev->num_evictions); @@ -617,7 +642,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (bo_va == NULL) continue; - r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index df7ab245..c38dc47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -46,6 +46,7 @@ #endif #include "vi.h" #include "bif/bif_4_1_d.h" +#include <linux/pci.h> static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); @@ -1181,10 +1182,38 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, return 1; } +static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev) +{ + adev->enable_virtual_display = false; + + if (amdgpu_virtual_display) { + struct drm_device *ddev = adev->ddev; + const char *pci_address_name = pci_name(ddev->pdev); + char *pciaddstr, *pciaddstr_tmp, *pciaddname; + + pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); + pciaddstr_tmp = pciaddstr; + while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) { + if (!strcmp(pci_address_name, pciaddname)) { + adev->enable_virtual_display = true; + break; + } + } + + DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n", + amdgpu_virtual_display, pci_address_name, + adev->enable_virtual_display); + + kfree(pciaddstr); + } +} + static int amdgpu_early_init(struct amdgpu_device *adev) { int i, r; + amdgpu_whether_enable_virtual_display(adev); + switch (adev->asic_type) { case CHIP_TOPAZ: case CHIP_TONGA: @@ -1521,6 +1550,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->gc_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); + INIT_LIST_HEAD(&adev->shadow_list); + mutex_init(&adev->shadow_list_lock); + adev->rmmio_base = pci_resource_start(adev->pdev, 5); adev->rmmio_size = pci_resource_len(adev->pdev, 5); adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); @@ -1937,6 +1969,126 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) return 0; } +static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) +{ + int i; + bool asic_hang = false; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].funcs->check_soft_reset) + adev->ip_blocks[i].funcs->check_soft_reset(adev); + if (adev->ip_block_status[i].hang) { + DRM_INFO("IP block:%d is hang!\n", i); + asic_hang = true; + } + } + return asic_hang; +} + +int amdgpu_pre_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->pre_soft_reset) { + r = adev->ip_blocks[i].funcs->pre_soft_reset(adev); + if (r) + return r; + } + } + + return 0; +} + +static bool amdgpu_need_full_reset(struct amdgpu_device *adev) +{ + if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) { + DRM_INFO("Some block need full reset!\n"); + return true; + } + return false; +} + +static int amdgpu_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->soft_reset) { + r = adev->ip_blocks[i].funcs->soft_reset(adev); + if (r) + return r; + } + } + + return 0; +} + +static int amdgpu_post_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->post_soft_reset) + r = adev->ip_blocks[i].funcs->post_soft_reset(adev); + if (r) + return r; + } + + return 0; +} + +bool amdgpu_need_backup(struct amdgpu_device *adev) +{ + if (adev->flags & AMD_IS_APU) + return false; + + return amdgpu_lockup_timeout > 0 ? true : false; +} + +static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct fence **fence) +{ + uint32_t domain; + int r; + + if (!bo->shadow) + return 0; + + r = amdgpu_bo_reserve(bo, false); + if (r) + return r; + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + /* if bo has been evicted, then no need to recover */ + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + r = amdgpu_bo_restore_from_shadow(adev, ring, bo, + NULL, fence, true); + if (r) { + DRM_ERROR("recover page table failed!\n"); + goto err; + } + } +err: + amdgpu_bo_unreserve(bo); + return r; +} + /** * amdgpu_gpu_reset - reset the asic * @@ -1949,6 +2101,12 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) { int i, r; int resched; + bool need_full_reset; + + if (!amdgpu_check_soft_reset(adev)) { + DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); + return 0; + } atomic_inc(&adev->gpu_reset_counter); @@ -1967,40 +2125,88 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(adev); - /* save scratch */ - amdgpu_atombios_scratch_regs_save(adev); - r = amdgpu_suspend(adev); + need_full_reset = amdgpu_need_full_reset(adev); -retry: - /* Disable fb access */ - if (adev->mode_info.num_crtc) { - struct amdgpu_mode_mc_save save; - amdgpu_display_stop_mc_access(adev, &save); - amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!need_full_reset) { + amdgpu_pre_soft_reset(adev); + r = amdgpu_soft_reset(adev); + amdgpu_post_soft_reset(adev); + if (r || amdgpu_check_soft_reset(adev)) { + DRM_INFO("soft reset failed, will fallback to full reset!\n"); + need_full_reset = true; + } } - r = amdgpu_asic_reset(adev); - /* post card */ - amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (need_full_reset) { + /* save scratch */ + amdgpu_atombios_scratch_regs_save(adev); + r = amdgpu_suspend(adev); - if (!r) { - dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); - r = amdgpu_resume(adev); +retry: + /* Disable fb access */ + if (adev->mode_info.num_crtc) { + struct amdgpu_mode_mc_save save; + amdgpu_display_stop_mc_access(adev, &save); + amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); + } + + r = amdgpu_asic_reset(adev); + /* post card */ + amdgpu_atom_asic_init(adev->mode_info.atom_context); + + if (!r) { + dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_resume(adev); + } + /* restore scratch */ + amdgpu_atombios_scratch_regs_restore(adev); } - /* restore scratch */ - amdgpu_atombios_scratch_regs_restore(adev); if (!r) { + amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); if (r) { dev_err(adev->dev, "ib ring test failed (%d).\n", r); r = amdgpu_suspend(adev); + need_full_reset = true; goto retry; } + /** + * recovery vm page tables, since we cannot depend on VRAM is + * consistent after gpu full reset. + */ + if (need_full_reset && amdgpu_need_backup(adev)) { + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + struct amdgpu_bo *bo, *tmp; + struct fence *fence = NULL, *next = NULL; + + DRM_INFO("recover vram bo from shadow\n"); + mutex_lock(&adev->shadow_list_lock); + list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { + amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); + if (fence) { + r = fence_wait(fence, false); + if (r) { + WARN(r, "recovery from shadow isn't comleted\n"); + break; + } + } + fence_put(fence); + fence = next; + } + mutex_unlock(&adev->shadow_list_lock); + if (fence) { + r = fence_wait(fence, false); + if (r) + WARN(r, "recovery from shadow isn't comleted\n"); + } + fence_put(fence); + } for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring) continue; + amd_sched_job_recovery(&ring->sched); kthread_unpark(ring->sched.thread); } @@ -2020,7 +2226,6 @@ retry: /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); } - amdgpu_irq_gpu_reset_resume_helper(adev); return r; } @@ -2178,22 +2383,26 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, struct amdgpu_device *adev = f->f_inode->i_private; ssize_t result = 0; int r; - bool use_bank; + bool pm_pg_lock, use_bank; unsigned instance_bank, sh_bank, se_bank; if (size & 0x3 || *pos & 0x3) return -EINVAL; + /* are we reading registers for which a PG lock is necessary? */ + pm_pg_lock = (*pos >> 23) & 1; + if (*pos & (1ULL << 62)) { se_bank = (*pos >> 24) & 0x3FF; sh_bank = (*pos >> 34) & 0x3FF; instance_bank = (*pos >> 44) & 0x3FF; use_bank = 1; - *pos &= 0xFFFFFF; } else { use_bank = 0; } + *pos &= 0x3FFFF; + if (use_bank) { if (sh_bank >= adev->gfx.config.max_sh_per_se || se_bank >= adev->gfx.config.max_shader_engines) @@ -2203,6 +2412,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, sh_bank, instance_bank); } + if (pm_pg_lock) + mutex_lock(&adev->pm.mutex); + while (size) { uint32_t value; @@ -2228,6 +2440,9 @@ end: mutex_unlock(&adev->grbm_idx_mutex); } + if (pm_pg_lock) + mutex_unlock(&adev->pm.mutex); + return result; } @@ -2443,7 +2658,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, return -ENOMEM; /* version, increment each time something is added */ - config[no_regs++] = 0; + config[no_regs++] = 2; config[no_regs++] = adev->gfx.config.max_shader_engines; config[no_regs++] = adev->gfx.config.max_tile_pipes; config[no_regs++] = adev->gfx.config.max_cu_per_sh; @@ -2468,6 +2683,15 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, config[no_regs++] = adev->gfx.config.gb_addr_config; config[no_regs++] = adev->gfx.config.num_rbs; + /* rev==1 */ + config[no_regs++] = adev->rev_id; + config[no_regs++] = adev->pg_flags; + config[no_regs++] = adev->cg_flags; + + /* rev==2 */ + config[no_regs++] = adev->family; + config[no_regs++] = adev->external_rev_id; + while (size && (*pos < no_regs * 4)) { uint32_t value; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 76f9602..9af8d3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -41,7 +41,7 @@ static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) container_of(cb, struct amdgpu_flip_work, cb); fence_put(f); - schedule_work(&work->flip_work); + schedule_work(&work->flip_work.work); } static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, @@ -63,16 +63,17 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, static void amdgpu_flip_work_func(struct work_struct *__work) { + struct delayed_work *delayed_work = + container_of(__work, struct delayed_work, work); struct amdgpu_flip_work *work = - container_of(__work, struct amdgpu_flip_work, flip_work); + container_of(delayed_work, struct amdgpu_flip_work, flip_work); struct amdgpu_device *adev = work->adev; struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &amdgpuCrtc->base; unsigned long flags; - unsigned i, repcnt = 4; - int vpos, hpos, stat, min_udelay = 0; - struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; + unsigned i; + int vpos, hpos; if (amdgpu_flip_handle_fence(work, &work->excl)) return; @@ -81,55 +82,23 @@ static void amdgpu_flip_work_func(struct work_struct *__work) if (amdgpu_flip_handle_fence(work, &work->shared[i])) return; - /* We borrow the event spin lock for protecting flip_status */ - spin_lock_irqsave(&crtc->dev->event_lock, flags); - - /* If this happens to execute within the "virtually extended" vblank - * interval before the start of the real vblank interval then it needs - * to delay programming the mmio flip until the real vblank is entered. - * This prevents completing a flip too early due to the way we fudge - * our vblank counter and vblank timestamps in order to work around the - * problem that the hw fires vblank interrupts before actual start of - * vblank (when line buffer refilling is done for a frame). It - * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for - * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts. - * - * In practice this won't execute very often unless on very fast - * machines because the time window for this to happen is very small. + /* Wait until we're out of the vertical blank period before the one + * targeted by the flip */ - while (amdgpuCrtc->enabled && --repcnt) { - /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank - * start in hpos, and to the "fudged earlier" vblank start in - * vpos. - */ - stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, - GET_DISTANCE_TO_VBLANKSTART, - &vpos, &hpos, NULL, NULL, - &crtc->hwmode); - - if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != - (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || - !(vpos >= 0 && hpos <= 0)) - break; - - /* Sleep at least until estimated real start of hw vblank */ - min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); - if (min_udelay > vblank->framedur_ns / 2000) { - /* Don't wait ridiculously long - something is wrong */ - repcnt = 0; - break; - } - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - usleep_range(min_udelay, 2 * min_udelay); - spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (amdgpuCrtc->enabled && + (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, + &vpos, &hpos, NULL, NULL, + &crtc->hwmode) + & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && + (int)(work->target_vblank - + amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) { + schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); + return; } - if (!repcnt) - DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " - "framedur %d, linedur %d, stat %d, vpos %d, " - "hpos %d\n", work->crtc_id, min_udelay, - vblank->framedur_ns / 1000, - vblank->linedur_ns / 1000, stat, vpos, hpos); + /* We borrow the event spin lock for protecting flip_status */ + spin_lock_irqsave(&crtc->dev->event_lock, flags); /* Do the flip (mmio) */ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); @@ -169,10 +138,10 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) kfree(work); } -int amdgpu_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) +int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, uint32_t target) { struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; @@ -191,7 +160,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, if (work == NULL) return -ENOMEM; - INIT_WORK(&work->flip_work, amdgpu_flip_work_func); + INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func); INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func); work->event = event; @@ -237,12 +206,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, amdgpu_bo_unreserve(new_rbo); work->base = base; - - r = drm_crtc_vblank_get(crtc); - if (r) { - DRM_ERROR("failed to get vblank before flip\n"); - goto pflip_cleanup; - } + work->target_vblank = target - drm_crtc_vblank_count(crtc) + + amdgpu_get_vblank_counter_kms(dev, work->crtc_id); /* we borrow the event spin lock for protecting flip_wrok */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -250,7 +215,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); r = -EBUSY; - goto vblank_cleanup; + goto pflip_cleanup; } amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; @@ -262,12 +227,9 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, /* update crtc fb */ crtc->primary->fb = fb; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - amdgpu_flip_work_func(&work->flip_work); + amdgpu_flip_work_func(&work->flip_work.work); return 0; -vblank_cleanup: - drm_crtc_vblank_put(crtc); - pflip_cleanup: if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); @@ -335,7 +297,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set) return ret; } -static const char *encoder_names[38] = { +static const char *encoder_names[41] = { "NONE", "INTERNAL_LVDS", "INTERNAL_TMDS1", @@ -374,6 +336,9 @@ static const char *encoder_names[38] = { "TRAVIS", "INTERNAL_VCE", "INTERNAL_UNIPHY3", + "HDMI_ANX9805", + "INTERNAL_AMCLK", + "VIRTUAL", }; static const char *hpd_names[6] = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 11263c5..7c911d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -53,9 +53,11 @@ * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same * at the end of IBs. * - 3.3.0 - Add VM support for UVD on supported hardware. + * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS. + * - 3.5.0 - Add support for new UVD_NO_OP register. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 3 +#define KMS_DRIVER_MINOR 5 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -84,11 +86,13 @@ int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; int amdgpu_powerplay = -1; int amdgpu_powercontainment = 1; +int amdgpu_sclk_deep_sleep_en = 1; unsigned amdgpu_pcie_gen_cap = 0; unsigned amdgpu_pcie_lane_cap = 0; unsigned amdgpu_cg_mask = 0xffffffff; unsigned amdgpu_pg_mask = 0xffffffff; char *amdgpu_disable_cu = NULL; +char *amdgpu_virtual_display = NULL; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -170,6 +174,9 @@ MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = module_param_named(powercontainment, amdgpu_powercontainment, int, 0444); #endif +MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)"); +module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444); + MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); @@ -185,6 +192,9 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); +MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)"); +module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h index 503d540..e73728d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h @@ -31,14 +31,6 @@ #define AMDGPU_GWS_SHIFT PAGE_SHIFT #define AMDGPU_OA_SHIFT PAGE_SHIFT -#define AMDGPU_PL_GDS TTM_PL_PRIV0 -#define AMDGPU_PL_GWS TTM_PL_PRIV1 -#define AMDGPU_PL_OA TTM_PL_PRIV2 - -#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0 -#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1 -#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2 - struct amdgpu_ring; struct amdgpu_bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 31a6763..c93a92a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -186,10 +186,8 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, "AMDGPU i2c hw bus %s", name); i2c->adapter.algo = &amdgpu_atombios_i2c_algo; ret = i2c_add_adapter(&i2c->adapter); - if (ret) { - DRM_ERROR("Failed to register hw i2c %s\n", name); + if (ret) goto out_free; - } } else { /* set the amdgpu bit adapter */ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index a31d7ef..f5810f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -142,7 +142,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } if (!ring->ready) { - dev_err(adev->dev, "couldn't schedule ib\n"); + dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 534fc04..5ebb3f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -40,32 +40,15 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) /* Allocate ring buffer */ if (adev->irq.ih.ring_obj == NULL) { - r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, NULL, &adev->irq.ih.ring_obj); + r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->irq.ih.ring_obj, + &adev->irq.ih.gpu_addr, + (void **)&adev->irq.ih.ring); if (r) { DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); return r; } - r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->irq.ih.ring_obj, - AMDGPU_GEM_DOMAIN_GTT, - &adev->irq.ih.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->irq.ih.ring_obj); - DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r); - return r; - } - r = amdgpu_bo_kmap(adev->irq.ih.ring_obj, - (void **)&adev->irq.ih.ring); - amdgpu_bo_unreserve(adev->irq.ih.ring_obj); - if (r) { - DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r); - return r; - } } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 7ef0935..f016464 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -70,6 +70,7 @@ struct amdgpu_irq { /* gen irq stuff */ struct irq_domain *domain; /* GPU irq controller domain */ unsigned virq[AMDGPU_MAX_IRQ_SRC_ID]; + uint32_t srbm_soft_reset; }; void amdgpu_irq_preinstall(struct drm_device *dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d942654..b78e740 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -292,14 +292,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file type = AMD_IP_BLOCK_TYPE_UVD; ring_mask = adev->uvd.ring.ready ? 1 : 0; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; - ib_size_alignment = 8; + ib_size_alignment = 16; break; case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++) ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; - ib_size_alignment = 8; + ib_size_alignment = 1; break; default: return -EINVAL; @@ -373,6 +373,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file case AMDGPU_INFO_NUM_BYTES_MOVED: ui64 = atomic64_read(&adev->num_bytes_moved); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; + case AMDGPU_INFO_NUM_EVICTIONS: + ui64 = atomic64_read(&adev->num_evictions); + return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: ui64 = atomic64_read(&adev->vram_usage); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 6b1d7d3..7b0eff7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -39,6 +39,8 @@ #include <drm/drm_plane_helper.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> +#include <linux/hrtimer.h> +#include "amdgpu_irq.h" struct amdgpu_bo; struct amdgpu_device; @@ -339,6 +341,8 @@ struct amdgpu_mode_info { int num_dig; /* number of dig blocks */ int disp_priority; const struct amdgpu_display_funcs *funcs; + struct hrtimer vblank_timer; + enum amdgpu_interrupt_state vsync_timer_enabled; }; #define AMDGPU_MAX_BL_LEVEL 0xFF @@ -587,10 +591,10 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile void amdgpu_print_display_setup(struct drm_device *dev); int amdgpu_modeset_create_props(struct amdgpu_device *adev); int amdgpu_crtc_set_config(struct drm_mode_set *set); -int amdgpu_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags); +int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, uint32_t target); extern const struct drm_mode_config_funcs amdgpu_mode_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6f0873c..b17734e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -44,14 +44,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev); static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, struct ttm_mem_reg *mem) { - u64 ret = 0; - if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) { - ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) > - adev->mc.visible_vram_size ? - adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : - mem->size; - } - return ret; + if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size) + return 0; + + return ((mem->start << PAGE_SHIFT) + mem->size) > + adev->mc.visible_vram_size ? + adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : + mem->size; } static void amdgpu_update_memory_usage(struct amdgpu_device *adev, @@ -99,6 +98,11 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); + if (!list_empty(&bo->shadow_list)) { + mutex_lock(&bo->adev->shadow_list_lock); + list_del_init(&bo->shadow_list); + mutex_unlock(&bo->adev->shadow_list_lock); + } kfree(bo->metadata); kfree(bo); } @@ -112,84 +116,93 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, struct ttm_placement *placement, - struct ttm_place *placements, + struct ttm_place *places, u32 domain, u64 flags) { - u32 c = 0, i; - - placement->placement = placements; - placement->busy_placement = placements; + u32 c = 0; if (domain & AMDGPU_GEM_DOMAIN_VRAM) { + unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; + if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && - adev->mc.visible_vram_size < adev->mc.real_vram_size) { - placements[c].fpfn = - adev->mc.visible_vram_size >> PAGE_SHIFT; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN; + !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && + adev->mc.visible_vram_size < adev->mc.real_vram_size) { + places[c].fpfn = visible_pfn; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | + TTM_PL_FLAG_TOPDOWN; + c++; } - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; - if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) - placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN; + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) + places[c].lpfn = visible_pfn; + else + places[c].flags |= TTM_PL_FLAG_TOPDOWN; + c++; } if (domain & AMDGPU_GEM_DOMAIN_GTT) { - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_TT; + if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + places[c].flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; - } else { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; - } + else + places[c].flags |= TTM_PL_FLAG_CACHED; + c++; } if (domain & AMDGPU_GEM_DOMAIN_CPU) { - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM | + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_SYSTEM; + if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + places[c].flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; - } else { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; - } + else + places[c].flags |= TTM_PL_FLAG_CACHED; + c++; } if (domain & AMDGPU_GEM_DOMAIN_GDS) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_GDS; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; + c++; } + if (domain & AMDGPU_GEM_DOMAIN_GWS) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_GWS; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; + c++; } + if (domain & AMDGPU_GEM_DOMAIN_OA) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_OA; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; + c++; } if (!c) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + c++; } + placement->num_placement = c; - placement->num_busy_placement = c; + placement->placement = places; - for (i = 0; i < c; i++) { - if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && - (placements[i].flags & TTM_PL_FLAG_VRAM) && - !placements[i].fpfn) - placements[i].lpfn = - adev->mc.visible_vram_size >> PAGE_SHIFT; - else - placements[i].lpfn = 0; - } + placement->num_busy_placement = c; + placement->busy_placement = places; } void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) @@ -211,6 +224,69 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, bo->placement.busy_placement = bo->placements; } +/** + * amdgpu_bo_create_kernel - create BO for kernel use + * + * @adev: amdgpu device object + * @size: size for the new BO + * @align: alignment for the new BO + * @domain: where to place it + * @bo_ptr: resulting BO + * @gpu_addr: GPU addr of the pinned BO + * @cpu_addr: optional CPU address mapping + * + * Allocates and pins a BO for kernel internal use. + * + * Returns 0 on success, negative error code otherwise. + */ +int amdgpu_bo_create_kernel(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr) +{ + int r; + + r = amdgpu_bo_create(adev, size, align, true, domain, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, bo_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); + return r; + } + + r = amdgpu_bo_reserve(*bo_ptr, false); + if (r) { + dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r); + goto error_free; + } + + r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr); + if (r) { + dev_err(adev->dev, "(%d) kernel bo pin failed\n", r); + goto error_unreserve; + } + + if (cpu_addr) { + r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); + if (r) { + dev_err(adev->dev, "(%d) kernel bo map failed\n", r); + goto error_unreserve; + } + } + + amdgpu_bo_unreserve(*bo_ptr); + + return 0; + +error_unreserve: + amdgpu_bo_unreserve(*bo_ptr); + +error_free: + amdgpu_bo_unref(bo_ptr); + + return r; +} + int amdgpu_bo_create_restricted(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, @@ -250,6 +326,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, } bo->adev = adev; INIT_LIST_HEAD(&bo->list); + INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | @@ -277,11 +354,79 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, if (unlikely(r != 0)) { return r; } + + if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && + bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { + struct fence *fence; + + if (adev->mman.buffer_funcs_ring == NULL || + !adev->mman.buffer_funcs_ring->ready) { + r = -EBUSY; + goto fail_free; + } + + r = amdgpu_bo_reserve(bo, false); + if (unlikely(r != 0)) + goto fail_free; + + amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (unlikely(r != 0)) + goto fail_unreserve; + + amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); + amdgpu_bo_fence(bo, fence, false); + amdgpu_bo_unreserve(bo); + fence_put(bo->tbo.moving); + bo->tbo.moving = fence_get(fence); + fence_put(fence); + } *bo_ptr = bo; trace_amdgpu_bo_create(bo); return 0; + +fail_unreserve: + amdgpu_bo_unreserve(bo); +fail_free: + amdgpu_bo_unref(&bo); + return r; +} + +static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, + unsigned long size, int byte_align, + struct amdgpu_bo *bo) +{ + struct ttm_placement placement = {0}; + struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + int r; + + if (bo->shadow) + return 0; + + bo->flags |= AMDGPU_GEM_CREATE_SHADOW; + memset(&placements, 0, + (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); + + amdgpu_ttm_placement_init(adev, &placement, + placements, AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC); + + r = amdgpu_bo_create_restricted(adev, size, byte_align, true, + AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC, + NULL, &placement, + bo->tbo.resv, + &bo->shadow); + if (!r) { + bo->shadow->parent = amdgpu_bo_ref(bo); + mutex_lock(&adev->shadow_list_lock); + list_add_tail(&bo->shadow_list, &adev->shadow_list); + mutex_unlock(&adev->shadow_list_lock); + } + + return r; } int amdgpu_bo_create(struct amdgpu_device *adev, @@ -293,6 +438,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, { struct ttm_placement placement = {0}; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + int r; memset(&placements, 0, (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); @@ -300,9 +446,83 @@ int amdgpu_bo_create(struct amdgpu_device *adev, amdgpu_ttm_placement_init(adev, &placement, placements, domain, flags); - return amdgpu_bo_create_restricted(adev, size, byte_align, kernel, - domain, flags, sg, &placement, - resv, bo_ptr); + r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, + domain, flags, sg, &placement, + resv, bo_ptr); + if (r) + return r; + + if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { + r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); + if (r) + amdgpu_bo_unref(bo_ptr); + } + + return r; +} + +int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct) + +{ + struct amdgpu_bo *shadow = bo->shadow; + uint64_t bo_addr, shadow_addr; + int r; + + if (!shadow) + return -EINVAL; + + bo_addr = amdgpu_bo_gpu_offset(bo); + shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); + + r = reservation_object_reserve_shared(bo->tbo.resv); + if (r) + goto err; + + r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, + amdgpu_bo_size(bo), resv, fence, + direct); + if (!r) + amdgpu_bo_fence(bo, *fence, true); + +err: + return r; +} + +int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct) + +{ + struct amdgpu_bo *shadow = bo->shadow; + uint64_t bo_addr, shadow_addr; + int r; + + if (!shadow) + return -EINVAL; + + bo_addr = amdgpu_bo_gpu_offset(bo); + shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); + + r = reservation_object_reserve_shared(bo->tbo.resv); + if (r) + goto err; + + r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, + amdgpu_bo_size(bo), resv, fence, + direct); + if (!r) + amdgpu_bo_fence(bo, *fence, true); + +err: + return r; } int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) @@ -380,16 +600,17 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; if (bo->pin_count) { + uint32_t mem_type = bo->tbo.mem.mem_type; + + if (domain != amdgpu_mem_type_to_domain(mem_type)) + return -EINVAL; + bo->pin_count++; if (gpu_addr) *gpu_addr = amdgpu_bo_gpu_offset(bo); if (max_offset != 0) { - u64 domain_start; - if (domain == AMDGPU_GEM_DOMAIN_VRAM) - domain_start = bo->adev->mc.vram_start; - else - domain_start = bo->adev->mc.gtt_start; + u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset; WARN_ON_ONCE(max_offset < (amdgpu_bo_gpu_offset(bo) - domain_start)); } @@ -401,7 +622,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, /* force to pin into visible video ram */ if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && - (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) { + (!max_offset || max_offset > + bo->adev->mc.visible_vram_size)) { if (WARN_ON_ONCE(min_offset > bo->adev->mc.visible_vram_size)) return -EINVAL; @@ -420,19 +642,23 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (likely(r == 0)) { - bo->pin_count = 1; - if (gpu_addr != NULL) - *gpu_addr = amdgpu_bo_gpu_offset(bo); - if (domain == AMDGPU_GEM_DOMAIN_VRAM) { - bo->adev->vram_pin_size += amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - bo->adev->invisible_pin_size += amdgpu_bo_size(bo); - } else - bo->adev->gart_pin_size += amdgpu_bo_size(bo); - } else { + if (unlikely(r)) { dev_err(bo->adev->dev, "%p pin failed\n", bo); + goto error; + } + + bo->pin_count = 1; + if (gpu_addr != NULL) + *gpu_addr = amdgpu_bo_gpu_offset(bo); + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + bo->adev->vram_pin_size += amdgpu_bo_size(bo); + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + bo->adev->invisible_pin_size += amdgpu_bo_size(bo); + } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { + bo->adev->gart_pin_size += amdgpu_bo_size(bo); } + +error: return r; } @@ -457,16 +683,20 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (likely(r == 0)) { - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { - bo->adev->vram_pin_size -= amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); - } else - bo->adev->gart_pin_size -= amdgpu_bo_size(bo); - } else { + if (unlikely(r)) { dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); + goto error; + } + + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { + bo->adev->vram_pin_size -= amdgpu_bo_size(bo); + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); + } else { + bo->adev->gart_pin_size -= amdgpu_bo_size(bo); } + +error: return r; } @@ -637,7 +867,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) for (i = 0; i < abo->placement.num_placement; i++) { /* Force into visible VRAM */ if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) && - (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn)) + (!abo->placements[i].lpfn || + abo->placements[i].lpfn > lpfn)) abo->placements[i].lpfn = lpfn; } r = ttm_bo_validate(bo, &abo->placement, false, false); @@ -674,3 +905,21 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, else reservation_object_add_excl_fence(resv, fence); } + +/** + * amdgpu_bo_gpu_offset - return GPU offset of bo + * @bo: amdgpu object for which we query the offset + * + * Returns current GPU offset of the object. + * + * Note: object should either be pinned or reserved when calling this + * function, it might be useful to add check for this for debugging. + */ +u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) +{ + WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); + WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && + !bo->pin_count); + + return bo->tbo.offset; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index bdb01d9..b6a2739 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -85,21 +85,6 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) ttm_bo_unreserve(&bo->tbo); } -/** - * amdgpu_bo_gpu_offset - return GPU offset of bo - * @bo: amdgpu object for which we query the offset - * - * Returns current GPU offset of the object. - * - * Note: object should either be pinned or reserved when calling this - * function, it might be useful to add check for this for debugging. - */ -static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) -{ - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); - return bo->tbo.offset; -} - static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) { return bo->tbo.num_pages << PAGE_SHIFT; @@ -139,6 +124,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct ttm_placement *placement, struct reservation_object *resv, struct amdgpu_bo **bo_ptr); +int amdgpu_bo_create_kernel(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); void amdgpu_bo_kunmap(struct amdgpu_bo *bo); struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); @@ -165,6 +154,19 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, bool shared); +u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); +int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, bool direct); +int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct); + /* * sub allocation diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 5cc7052..d4ec3cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1103,54 +1103,46 @@ force: void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { - if (adev->pp_enabled) + if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) { + /* enable/disable UVD */ + mutex_lock(&adev->pm.mutex); amdgpu_dpm_powergate_uvd(adev, !enable); - else { - if (adev->pm.funcs->powergate_uvd) { + mutex_unlock(&adev->pm.mutex); + } else { + if (enable) { mutex_lock(&adev->pm.mutex); - /* enable/disable UVD */ - amdgpu_dpm_powergate_uvd(adev, !enable); + adev->pm.dpm.uvd_active = true; + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; mutex_unlock(&adev->pm.mutex); } else { - if (enable) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = true; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; - mutex_unlock(&adev->pm.mutex); - } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = false; - mutex_unlock(&adev->pm.mutex); - } - amdgpu_pm_compute_clocks(adev); + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.uvd_active = false; + mutex_unlock(&adev->pm.mutex); } - + amdgpu_pm_compute_clocks(adev); } } void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { - if (adev->pp_enabled) + if (adev->pp_enabled || adev->pm.funcs->powergate_vce) { + /* enable/disable VCE */ + mutex_lock(&adev->pm.mutex); amdgpu_dpm_powergate_vce(adev, !enable); - else { - if (adev->pm.funcs->powergate_vce) { + mutex_unlock(&adev->pm.mutex); + } else { + if (enable) { mutex_lock(&adev->pm.mutex); - amdgpu_dpm_powergate_vce(adev, !enable); + adev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; mutex_unlock(&adev->pm.mutex); } else { - if (enable) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = true; - /* XXX select vce level based on ring/task */ - adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; - mutex_unlock(&adev->pm.mutex); - } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = false; - mutex_unlock(&adev->pm.mutex); - } - amdgpu_pm_compute_clocks(adev); + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.vce_active = false; + mutex_unlock(&adev->pm.mutex); } + amdgpu_pm_compute_clocks(adev); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index c5738a22..5450744 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -52,7 +52,9 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) pp_init->chip_family = adev->family; pp_init->chip_id = adev->asic_type; pp_init->device = amdgpu_cgs_create_device(adev); - pp_init->powercontainment_enabled = amdgpu_powercontainment; + pp_init->rev_id = adev->pdev->revision; + pp_init->sub_sys_id = adev->pdev->subsystem_device; + pp_init->sub_vendor_id = adev->pdev->subsystem_vendor; ret = amd_powerplay_init(pp_init, amd_pp); kfree(pp_init); @@ -106,11 +108,10 @@ static int amdgpu_pp_early_init(void *handle) break; case CHIP_TONGA: case CHIP_FIJI: - adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; - break; + case CHIP_TOPAZ: case CHIP_CARRIZO: case CHIP_STONEY: - adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; + adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; break; /* These chips don't have powerplay implemenations */ case CHIP_BONAIRE: @@ -118,7 +119,6 @@ static int amdgpu_pp_early_init(void *handle) case CHIP_KABINI: case CHIP_MULLINS: case CHIP_KAVERI: - case CHIP_TOPAZ: default: adev->pp_enabled = false; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 85aeb0a..242ba04 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -222,33 +222,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, /* Allocate ring buffer */ if (ring->ring_obj == NULL) { - r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, NULL, &ring->ring_obj); + r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &ring->ring_obj, + &ring->gpu_addr, + (void **)&ring->ring); if (r) { dev_err(adev->dev, "(%d) ring create failed\n", r); return r; } - r = amdgpu_bo_reserve(ring->ring_obj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT, - &ring->gpu_addr); - if (r) { - amdgpu_bo_unreserve(ring->ring_obj); - dev_err(adev->dev, "(%d) ring pin failed\n", r); - return r; - } - r = amdgpu_bo_kmap(ring->ring_obj, - (void **)&ring->ring); - memset((void *)ring->ring, 0, ring->ring_size); - - amdgpu_bo_unreserve(ring->ring_obj); - if (r) { - dev_err(adev->dev, "(%d) ring map failed\n", r); - return r; - } } ring->ptr_mask = (ring->ring_size / 4) - 1; ring->max_dw = max_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 05a53f4..b827c75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(gtt_obj[i]); r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, - size, NULL, &fence); + size, NULL, &fence, false); if (r) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); @@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(vram_obj); r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, - size, NULL, &fence); + size, NULL, &fence, false); if (r) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 9b61c8b..5447973 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -34,6 +34,7 @@ #include <ttm/ttm_placement.h> #include <ttm/ttm_module.h> #include <ttm/ttm_page_alloc.h> +#include <ttm/ttm_memory.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include <linux/seq_file.h> @@ -74,7 +75,7 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) ttm_mem_global_release(ref->object); } -static int amdgpu_ttm_global_init(struct amdgpu_device *adev) +int amdgpu_ttm_global_init(struct amdgpu_device *adev) { struct drm_global_reference *global_ref; struct amdgpu_ring *ring; @@ -256,10 +257,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, switch (old_mem->mem_type) { case TTM_PL_VRAM: - old_start += adev->mc.vram_start; - break; case TTM_PL_TT: - old_start += adev->mc.gtt_start; + old_start += bo->bdev->man[old_mem->mem_type].gpu_offset; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); @@ -267,10 +266,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, } switch (new_mem->mem_type) { case TTM_PL_VRAM: - new_start += adev->mc.vram_start; - break; case TTM_PL_TT: - new_start += adev->mc.gtt_start; + new_start += bo->bdev->man[new_mem->mem_type].gpu_offset; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); @@ -285,7 +282,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_copy_buffer(ring, old_start, new_start, new_mem->num_pages * PAGE_SIZE, /* bytes */ - bo->resv, &fence); + bo->resv, &fence, false); if (r) return r; @@ -335,7 +332,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -368,7 +365,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } @@ -435,8 +432,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); if (r) { return r; } @@ -950,6 +946,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo) struct list_head *res = lru->lru[tbo->mem.mem_type]; lru->lru[tbo->mem.mem_type] = &tbo->lru; + while ((++lru)->lru[tbo->mem.mem_type] == res) + lru->lru[tbo->mem.mem_type] = &tbo->lru; return res; } @@ -960,6 +958,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) struct list_head *res = lru->swap_lru; lru->swap_lru = &tbo->swap; + while ((++lru)->swap_lru == res) + lru->swap_lru = &tbo->swap; return res; } @@ -987,10 +987,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) unsigned i, j; int r; - r = amdgpu_ttm_global_init(adev); - if (r) { - return r; - } /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&adev->mman.bdev, adev->mman.bo_global_ref.ref.object, @@ -1011,6 +1007,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) lru->swap_lru = &adev->mman.bdev.glob->swap_lru; } + for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) + adev->mman.guard.lru[j] = NULL; + adev->mman.guard.swap_lru = NULL; + adev->mman.initialized = true; r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, adev->mc.real_vram_size >> PAGE_SHIFT); @@ -1151,7 +1151,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct fence **fence) + struct fence **fence, bool direct_submit) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -1195,8 +1195,79 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); + if (direct_submit) { + r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, + NULL, NULL, fence); + job->fence = fence_get(*fence); + if (r) + DRM_ERROR("Error scheduling IBs (%d)\n", r); + amdgpu_job_free(job); + } else { + r = amdgpu_job_submit(job, ring, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, fence); + if (r) + goto error_free; + } + + return r; + +error_free: + amdgpu_job_free(job); + return r; +} + +int amdgpu_fill_buffer(struct amdgpu_bo *bo, + uint32_t src_data, + struct reservation_object *resv, + struct fence **fence) +{ + struct amdgpu_device *adev = bo->adev; + struct amdgpu_job *job; + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + + uint32_t max_bytes, byte_count; + uint64_t dst_offset; + unsigned int num_loops, num_dw; + unsigned int i; + int r; + + byte_count = bo->tbo.num_pages << PAGE_SHIFT; + max_bytes = adev->mman.buffer_funcs->fill_max_bytes; + num_loops = DIV_ROUND_UP(byte_count, max_bytes); + num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; + + /* for IB padding */ + while (num_dw & 0x7) + num_dw++; + + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + if (r) + return r; + + if (resv) { + r = amdgpu_sync_resv(adev, &job->sync, resv, + AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + DRM_ERROR("sync failed (%d).\n", r); + goto error_free; + } + } + + dst_offset = bo->tbo.mem.start << PAGE_SHIFT; + for (i = 0; i < num_loops; i++) { + uint32_t cur_size_in_bytes = min(byte_count, max_bytes); + + amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, + dst_offset, cur_size_in_bytes); + + dst_offset += cur_size_in_bytes; + byte_count -= cur_size_in_bytes; + } + + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); r = amdgpu_job_submit(job, ring, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, fence); + AMDGPU_FENCE_OWNER_UNDEFINED, fence); if (r) goto error_free; @@ -1387,3 +1458,8 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) #endif } + +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev) +{ + return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h new file mode 100644 index 0000000..72f6bfc --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -0,0 +1,80 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_TTM_H__ +#define __AMDGPU_TTM_H__ + +#include "gpu_scheduler.h" + +#define AMDGPU_PL_GDS TTM_PL_PRIV0 +#define AMDGPU_PL_GWS TTM_PL_PRIV1 +#define AMDGPU_PL_OA TTM_PL_PRIV2 + +#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0 +#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1 +#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2 + +#define AMDGPU_TTM_LRU_SIZE 20 + +struct amdgpu_mman_lru { + struct list_head *lru[TTM_NUM_MEM_TYPES]; + struct list_head *swap_lru; +}; + +struct amdgpu_mman { + struct ttm_bo_global_ref bo_global_ref; + struct drm_global_reference mem_global_ref; + struct ttm_bo_device bdev; + bool mem_global_referenced; + bool initialized; + +#if defined(CONFIG_DEBUG_FS) + struct dentry *vram; + struct dentry *gtt; +#endif + + /* buffer handling */ + const struct amdgpu_buffer_funcs *buffer_funcs; + struct amdgpu_ring *buffer_funcs_ring; + /* Scheduler entity for buffer moves */ + struct amd_sched_entity entity; + + /* custom LRU management */ + struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; + /* guard for log2_size array, don't add anything in between */ + struct amdgpu_mman_lru guard; +}; + +int amdgpu_copy_buffer(struct amdgpu_ring *ring, + uint64_t src_offset, + uint64_t dst_offset, + uint32_t byte_count, + struct reservation_object *resv, + struct fence **fence, bool direct_submit); +int amdgpu_fill_buffer(struct amdgpu_bo *bo, + uint32_t src_data, + struct reservation_object *resv, + struct fence **fence); + +int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b11f4e8..cc766cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -201,39 +201,14 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; - r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, &adev->uvd.vcpu_bo); + r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, + &adev->uvd.gpu_addr, &adev->uvd.cpu_addr); if (r) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; } - r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); - if (r) { - amdgpu_bo_unref(&adev->uvd.vcpu_bo); - dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r); - return r; - } - - r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, - &adev->uvd.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - amdgpu_bo_unref(&adev->uvd.vcpu_bo); - dev_err(adev->dev, "(%d) UVD bo pin failed\n", r); - return r; - } - - r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr); - if (r) { - dev_err(adev->dev, "(%d) UVD map failed\n", r); - return r; - } - - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - ring = &adev->uvd.ring; rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity, @@ -323,7 +298,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (!adev->uvd.saved_bo) return -ENOMEM; - memcpy(adev->uvd.saved_bo, ptr, size); + memcpy_fromio(adev->uvd.saved_bo, ptr, size); return 0; } @@ -340,7 +315,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ptr = adev->uvd.cpu_addr; if (adev->uvd.saved_bo != NULL) { - memcpy(ptr, adev->uvd.saved_bo, size); + memcpy_toio(ptr, adev->uvd.saved_bo, size); kfree(adev->uvd.saved_bo); adev->uvd.saved_bo = NULL; } else { @@ -349,11 +324,11 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->uvd.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, - (adev->uvd.fw->size) - offset); + memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); - memset(ptr, 0, size); + memset_io(ptr, 0, size); } return 0; @@ -843,6 +818,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, return r; break; case mmUVD_ENGINE_CNTL: + case mmUVD_NO_OP: break; default: DRM_ERROR("Invalid reg 0x%X!\n", reg); @@ -981,8 +957,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->ptr[3] = addr >> 32; ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); ib->ptr[5] = 0; - for (i = 6; i < 16; ++i) - ib->ptr[i] = PACKET2(0); + for (i = 6; i < 16; i += 2) { + ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0); + ib->ptr[i+1] = 0; + } ib->length_dw = 16; if (direct) { @@ -1114,15 +1092,9 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, uvd.idle_work.work); - unsigned i, fences, handles = 0; - - fences = amdgpu_fence_count_emitted(&adev->uvd.ring); - - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.handles[i])) - ++handles; + unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); - if (fences == 0 && handles == 0) { + if (fences == 0) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, false); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 05865ce..da52af2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -282,8 +282,8 @@ int amdgpu_vce_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vce.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy(cpu_addr, (adev->vce.fw->data) + offset, - (adev->vce.fw->size) - offset); + memcpy_toio(cpu_addr, adev->vce.fw->data + offset, + adev->vce.fw->size - offset); amdgpu_bo_kunmap(adev->vce.vcpu_bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8e642fc..bf56f18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -51,19 +51,22 @@ * SI supports 16. */ -/* Special value that no flush is necessary */ -#define AMDGPU_VM_NO_FLUSH (~0ll) - /* Local structure. Encapsulate some VM table update parameters to reduce * the number of function parameters */ -struct amdgpu_vm_update_params { +struct amdgpu_pte_update_params { + /* amdgpu device we do this update for */ + struct amdgpu_device *adev; /* address where to copy page table entries from */ uint64_t src; - /* DMA addresses to use for mapping */ - dma_addr_t *pages_addr; /* indirect buffer to fill with commands */ struct amdgpu_ib *ib; + /* Function which actually does the update */ + void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, + uint64_t addr, unsigned count, uint32_t incr, + uint32_t flags); + /* indicate update pt or its shadow */ + bool shadow; }; /** @@ -467,10 +470,9 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, } /** - * amdgpu_vm_update_pages - helper to call the right asic function + * amdgpu_vm_do_set_ptes - helper to call the right asic function * - * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition + * @params: see amdgpu_pte_update_params definition * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update @@ -480,35 +482,47 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * Traces the parameters and calls the right asic functions * to setup the page table using the DMA. */ -static void amdgpu_vm_update_pages(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, - uint64_t pe, uint64_t addr, - unsigned count, uint32_t incr, - uint32_t flags) +static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, + uint64_t pe, uint64_t addr, + unsigned count, uint32_t incr, + uint32_t flags) { trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - if (vm_update_params->src) { - amdgpu_vm_copy_pte(adev, vm_update_params->ib, - pe, (vm_update_params->src + (addr >> 12) * 8), count); - - } else if (vm_update_params->pages_addr) { - amdgpu_vm_write_pte(adev, vm_update_params->ib, - vm_update_params->pages_addr, - pe, addr, count, incr, flags); - - } else if (count < 3) { - amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr, - count, incr, flags); + if (count < 3) { + amdgpu_vm_write_pte(params->adev, params->ib, pe, + addr | flags, count, incr); } else { - amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr, + amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr, count, incr, flags); } } /** + * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART + * + * @params: see amdgpu_pte_update_params definition + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: hw access flags + * + * Traces the parameters and calls the DMA function to copy the PTEs. + */ +static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, + uint64_t pe, uint64_t addr, + unsigned count, uint32_t incr, + uint32_t flags) +{ + trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); + + amdgpu_vm_copy_pte(params->adev, params->ib, pe, + (params->src + (addr >> 12) * 8), count); +} + +/** * amdgpu_vm_clear_bo - initially clear the page dir/table * * @adev: amdgpu_device pointer @@ -523,12 +537,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_ring *ring; struct fence *fence = NULL; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; unsigned entries; uint64_t addr; int r; - memset(&vm_update_params, 0, sizeof(vm_update_params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); r = reservation_object_reserve_shared(bo->tbo.resv); @@ -546,9 +559,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; - vm_update_params.ib = &job->ibs[0]; - amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries, - 0, 0); + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.ib = &job->ibs[0]; + amdgpu_vm_do_set_ptes(¶ms, addr, 0, entries, 0, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > 64); @@ -577,55 +591,41 @@ error: * Look up the physical address of the page that the pte resolves * to and return the pointer for the page table entry. */ -uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) +static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) { uint64_t result; - if (pages_addr) { - /* page table offset */ - result = pages_addr[addr >> PAGE_SHIFT]; - - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); + /* page table offset */ + result = pages_addr[addr >> PAGE_SHIFT]; - } else { - /* No mapping required */ - result = addr; - } + /* in case cpu page size != gpu page size*/ + result |= addr & (~PAGE_MASK); result &= 0xFFFFFFFFFFFFF000ULL; return result; } -/** - * amdgpu_vm_update_pdes - make sure that page directory is valid - * - * @adev: amdgpu_device pointer - * @vm: requested vm - * @start: start of GPU address range - * @end: end of GPU address range - * - * Allocates new page tables if necessary - * and updates the page directory. - * Returns 0 for success, error for failure. - */ -int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, - struct amdgpu_vm *vm) +static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + bool shadow) { struct amdgpu_ring *ring; - struct amdgpu_bo *pd = vm->page_directory; - uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); + struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow : + vm->page_directory; + uint64_t pd_addr; uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; struct fence *fence = NULL; int r; - memset(&vm_update_params, 0, sizeof(vm_update_params)); + if (!pd) + return 0; + pd_addr = amdgpu_bo_gpu_offset(pd); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); /* padding, etc. */ @@ -638,7 +638,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (r) return r; - vm_update_params.ib = &job->ibs[0]; + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.ib = &job->ibs[0]; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -649,19 +651,25 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, continue; pt = amdgpu_bo_gpu_offset(bo); - if (vm->page_tables[pt_idx].addr == pt) - continue; - vm->page_tables[pt_idx].addr = pt; + if (!shadow) { + if (vm->page_tables[pt_idx].addr == pt) + continue; + vm->page_tables[pt_idx].addr = pt; + } else { + if (vm->page_tables[pt_idx].shadow_addr == pt) + continue; + vm->page_tables[pt_idx].shadow_addr = pt; + } pde = pd_addr + pt_idx * 8; if (((last_pde + 8 * count) != pde) || - ((last_pt + incr * count) != pt)) { + ((last_pt + incr * count) != pt) || + (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { if (count) { - amdgpu_vm_update_pages(adev, &vm_update_params, - last_pde, last_pt, - count, incr, - AMDGPU_PTE_VALID); + amdgpu_vm_do_set_ptes(¶ms, last_pde, + last_pt, count, incr, + AMDGPU_PTE_VALID); } count = 1; @@ -673,15 +681,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, &vm_update_params, - last_pde, last_pt, - count, incr, AMDGPU_PTE_VALID); + amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, + count, incr, AMDGPU_PTE_VALID); - if (vm_update_params.ib->length_dw != 0) { - amdgpu_ring_pad_ib(ring, vm_update_params.ib); + if (params.ib->length_dw != 0) { + amdgpu_ring_pad_ib(ring, params.ib); amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); - WARN_ON(vm_update_params.ib->length_dw > ndw); + WARN_ON(params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &fence); if (r) @@ -703,92 +710,33 @@ error_free: return r; } -/** - * amdgpu_vm_frag_ptes - add fragment information to PTEs +/* + * amdgpu_vm_update_pdes - make sure that page directory is valid * * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition - * @pe_start: first PTE to handle - * @pe_end: last PTE to handle - * @addr: addr those PTEs should point to - * @flags: hw mapping flags + * @vm: requested vm + * @start: start of GPU address range + * @end: end of GPU address range + * + * Allocates new page tables if necessary + * and updates the page directory. + * Returns 0 for success, error for failure. */ -static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, - uint64_t pe_start, uint64_t pe_end, - uint64_t addr, uint32_t flags) +int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, + struct amdgpu_vm *vm) { - /** - * The MC L1 TLB supports variable sized pages, based on a fragment - * field in the PTE. When this field is set to a non-zero value, page - * granularity is increased from 4KB to (1 << (12 + frag)). The PTE - * flags are considered valid for all PTEs within the fragment range - * and corresponding mappings are assumed to be physically contiguous. - * - * The L1 TLB can store a single PTE for the whole fragment, - * significantly increasing the space available for translation - * caching. This leads to large improvements in throughput when the - * TLB is under pressure. - * - * The L2 TLB distributes small and large fragments into two - * asymmetric partitions. The large fragment cache is significantly - * larger. Thus, we try to use large fragments wherever possible. - * Userspace can support this by aligning virtual base address and - * allocation size to the fragment size. - */ - - /* SI and newer are optimized for 64KB */ - uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB; - uint64_t frag_align = 0x80; - - uint64_t frag_start = ALIGN(pe_start, frag_align); - uint64_t frag_end = pe_end & ~(frag_align - 1); - - unsigned count; - - /* Abort early if there isn't anything to do */ - if (pe_start == pe_end) - return; - - /* system pages are non continuously */ - if (vm_update_params->src || vm_update_params->pages_addr || - !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { - - count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, pe_start, - addr, count, AMDGPU_GPU_PAGE_SIZE, - flags); - return; - } - - /* handle the 4K area at the beginning */ - if (pe_start != frag_start) { - count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, - count, AMDGPU_GPU_PAGE_SIZE, flags); - addr += AMDGPU_GPU_PAGE_SIZE * count; - } - - /* handle the area in the middle */ - count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); + int r; - /* handle the 4K area at the end */ - if (frag_end != pe_end) { - addr += AMDGPU_GPU_PAGE_SIZE * count; - count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr, - count, AMDGPU_GPU_PAGE_SIZE, flags); - } + r = amdgpu_vm_update_pd_or_shadow(adev, vm, true); + if (r) + return r; + return amdgpu_vm_update_pd_or_shadow(adev, vm, false); } /** * amdgpu_vm_update_ptes - make sure that page tables are valid * - * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition + * @params: see amdgpu_pte_update_params definition * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range @@ -797,16 +745,14 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * * Update the page tables in the range @start - @end. */ -static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, +static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, struct amdgpu_vm *vm, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) { const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - uint64_t cur_pe_start, cur_pe_end, cur_dst; + uint64_t cur_pe_start, cur_nptes, cur_dst; uint64_t addr; /* next GPU address to be updated */ uint64_t pt_idx; struct amdgpu_bo *pt; @@ -817,7 +763,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, addr = start; pt_idx = addr >> amdgpu_vm_block_size; pt = vm->page_tables[pt_idx].entry.robj; - + if (params->shadow) { + if (!pt->shadow) + return; + pt = vm->page_tables[pt_idx].entry.robj->shadow; + } if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else @@ -825,7 +775,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, cur_pe_start = amdgpu_bo_gpu_offset(pt); cur_pe_start += (addr & mask) * 8; - cur_pe_end = cur_pe_start + 8 * nptes; + cur_nptes = nptes; cur_dst = dst; /* for next ptb*/ @@ -836,6 +786,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, while (addr < end) { pt_idx = addr >> amdgpu_vm_block_size; pt = vm->page_tables[pt_idx].entry.robj; + if (params->shadow) { + if (!pt->shadow) + return; + pt = vm->page_tables[pt_idx].entry.robj->shadow; + } if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; @@ -845,19 +800,19 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, next_pe_start = amdgpu_bo_gpu_offset(pt); next_pe_start += (addr & mask) * 8; - if (cur_pe_end == next_pe_start) { + if ((cur_pe_start + 8 * cur_nptes) == next_pe_start && + ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) { /* The next ptb is consecutive to current ptb. - * Don't call amdgpu_vm_frag_ptes now. + * Don't call the update function now. * Will update two ptbs together in future. */ - cur_pe_end += 8 * nptes; + cur_nptes += nptes; } else { - amdgpu_vm_frag_ptes(adev, vm_update_params, - cur_pe_start, cur_pe_end, - cur_dst, flags); + params->func(params, cur_pe_start, cur_dst, cur_nptes, + AMDGPU_GPU_PAGE_SIZE, flags); cur_pe_start = next_pe_start; - cur_pe_end = next_pe_start + 8 * nptes; + cur_nptes = nptes; cur_dst = dst; } @@ -866,8 +821,79 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start, - cur_pe_end, cur_dst, flags); + params->func(params, cur_pe_start, cur_dst, cur_nptes, + AMDGPU_GPU_PAGE_SIZE, flags); +} + +/* + * amdgpu_vm_frag_ptes - add fragment information to PTEs + * + * @params: see amdgpu_pte_update_params definition + * @vm: requested vm + * @start: first PTE to handle + * @end: last PTE to handle + * @dst: addr those PTEs should point to + * @flags: hw mapping flags + */ +static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, + struct amdgpu_vm *vm, + uint64_t start, uint64_t end, + uint64_t dst, uint32_t flags) +{ + /** + * The MC L1 TLB supports variable sized pages, based on a fragment + * field in the PTE. When this field is set to a non-zero value, page + * granularity is increased from 4KB to (1 << (12 + frag)). The PTE + * flags are considered valid for all PTEs within the fragment range + * and corresponding mappings are assumed to be physically contiguous. + * + * The L1 TLB can store a single PTE for the whole fragment, + * significantly increasing the space available for translation + * caching. This leads to large improvements in throughput when the + * TLB is under pressure. + * + * The L2 TLB distributes small and large fragments into two + * asymmetric partitions. The large fragment cache is significantly + * larger. Thus, we try to use large fragments wherever possible. + * Userspace can support this by aligning virtual base address and + * allocation size to the fragment size. + */ + + const uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG; + + uint64_t frag_start = ALIGN(start, frag_align); + uint64_t frag_end = end & ~(frag_align - 1); + + uint32_t frag; + + /* system pages are non continuously */ + if (params->src || !(flags & AMDGPU_PTE_VALID) || + (frag_start >= frag_end)) { + + amdgpu_vm_update_ptes(params, vm, start, end, dst, flags); + return; + } + + /* use more than 64KB fragment size if possible */ + frag = lower_32_bits(frag_start | frag_end); + frag = likely(frag) ? __ffs(frag) : 31; + + /* handle the 4K area at the beginning */ + if (start != frag_start) { + amdgpu_vm_update_ptes(params, vm, start, frag_start, + dst, flags); + dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE; + } + + /* handle the area in the middle */ + amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst, + flags | AMDGPU_PTE_FRAG(frag)); + + /* handle the 4K area at the end */ + if (frag_end != end) { + dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE; + amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags); + } } /** @@ -900,14 +926,19 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; struct fence *f = NULL; int r; + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.src = src; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - memset(&vm_update_params, 0, sizeof(vm_update_params)); - vm_update_params.src = src; - vm_update_params.pages_addr = pages_addr; + + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.src = src; /* sync to everything on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) @@ -924,30 +955,52 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if (vm_update_params.src) { + if (src) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (vm_update_params.pages_addr) { - /* header for write data commands */ - ndw += ncmds * 4; + params.func = amdgpu_vm_do_copy_ptes; + + } else if (pages_addr) { + /* copy commands needed */ + ndw += ncmds * 7; - /* body of write data command */ + /* and also PTEs */ ndw += nptes * 2; + params.func = amdgpu_vm_do_copy_ptes; + } else { /* set page commands needed */ ndw += ncmds * 10; /* two extra commands for begin/end of fragment */ ndw += 2 * 10; + + params.func = amdgpu_vm_do_set_ptes; } r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); if (r) return r; - vm_update_params.ib = &job->ibs[0]; + params.ib = &job->ibs[0]; + + if (!src && pages_addr) { + uint64_t *pte; + unsigned i; + + /* Put the PTEs at the end of the IB. */ + i = ndw - nptes * 2; + pte= (uint64_t *)&(job->ibs->ptr[i]); + params.src = job->ibs->gpu_addr + i * 4; + + for (i = 0; i < nptes; ++i) { + pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i * + AMDGPU_GPU_PAGE_SIZE); + pte[i] |= flags; + } + } r = amdgpu_sync_fence(adev, &job->sync, exclusive); if (r) @@ -962,11 +1015,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start, - last + 1, addr, flags); + params.shadow = true; + amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); + params.shadow = false; + amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); - amdgpu_ring_pad_ib(ring, vm_update_params.ib); - WARN_ON(vm_update_params.ib->length_dw > ndw); + amdgpu_ring_pad_ib(ring, params.ib); + WARN_ON(params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) @@ -1062,28 +1117,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @bo_va: requested BO and VM object - * @mem: ttm mem + * @clear: if true clear the entries * * Fill in the page table entries for @bo_va. * Returns 0 for success, -EINVAL for failure. - * - * Object have to be reserved and mutex must be locked! */ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - struct ttm_mem_reg *mem) + bool clear) { struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; uint32_t gtt_flags, flags; + struct ttm_mem_reg *mem; struct fence *exclusive; uint64_t addr; int r; - if (mem) { + if (clear) { + mem = NULL; + addr = 0; + exclusive = NULL; + } else { struct ttm_dma_tt *ttm; + mem = &bo_va->bo->tbo.mem; addr = (u64)mem->start << PAGE_SHIFT; switch (mem->mem_type) { case TTM_PL_TT: @@ -1101,9 +1160,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, } exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); - } else { - addr = 0; - exclusive = NULL; } flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); @@ -1134,7 +1190,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_lock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); list_del_init(&bo_va->vm_status); - if (!mem) + if (clear) list_add(&bo_va->vm_status, &vm->cleared); spin_unlock(&vm->status_lock); @@ -1197,7 +1253,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_bo_va, vm_status); spin_unlock(&vm->status_lock); - r = amdgpu_vm_bo_update(adev, bo_va, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, true); if (r) return r; @@ -1342,7 +1398,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_NO_CPU_ACCESS, + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_SHADOW, NULL, resv, &pt); if (r) goto error_free; @@ -1541,7 +1598,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) r = amdgpu_bo_create(adev, pd_size, align, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_NO_CPU_ACCESS, + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_SHADOW, NULL, NULL, &vm->page_directory); if (r) goto error_free_sched_entity; @@ -1597,10 +1655,16 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) kfree(mapping); } - for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) + for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { + if (vm->page_tables[i].entry.robj && + vm->page_tables[i].entry.robj->shadow) + amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow); amdgpu_bo_unref(&vm->page_tables[i].entry.robj); + } drm_free_large(vm->page_tables); + if (vm->page_directory->shadow) + amdgpu_bo_unref(&vm->page_directory->shadow); amdgpu_bo_unref(&vm->page_directory); fence_put(vm->page_directory_fence); } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 7f85c2c..f81068b 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -88,7 +88,6 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan, /* timeout */ if (args.v2.ucReplyStatus == 1) { - DRM_DEBUG_KMS("dp_aux_ch timeout\n"); r = -ETIMEDOUT; goto done; } @@ -339,22 +338,21 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector) { struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv; u8 msg[DP_DPCD_SIZE]; - int ret, i; + int ret; - for (i = 0; i < 7; i++) { - ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg, - DP_DPCD_SIZE); - if (ret == DP_DPCD_SIZE) { - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); + ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, + msg, DP_DPCD_SIZE); + if (ret == DP_DPCD_SIZE) { + memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), - dig_connector->dpcd); + DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), + dig_connector->dpcd); - amdgpu_atombios_dp_probe_oui(amdgpu_connector); + amdgpu_atombios_dp_probe_oui(amdgpu_connector); - return 0; - } + return 0; } + dig_connector->dpcd[0] = 0; return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index a5c94b4..a0d63a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -5874,7 +5874,10 @@ static int ci_dpm_init(struct amdgpu_device *adev) pi->pcie_dpm_key_disabled = 0; pi->thermal_sclk_dpm_enabled = 0; - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; pi->mclk_strobe_mode_threshold = 40000; pi->mclk_stutter_mode_threshold = 40000; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 4efc901..825de80 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -67,6 +67,7 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_powerplay.h" +#include "dce_virtual.h" /* * Indirect registers accessor @@ -1708,6 +1709,74 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 2, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1776,6 +1845,74 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 5, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 3, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version kabini_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1844,6 +1981,74 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 3, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version mullins_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1912,6 +2117,74 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 3, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1980,32 +2253,128 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 1, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + int cik_set_ip_blocks(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_BONAIRE: - adev->ip_blocks = bonaire_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks); - break; - case CHIP_HAWAII: - adev->ip_blocks = hawaii_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks); - break; - case CHIP_KAVERI: - adev->ip_blocks = kaveri_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks); - break; - case CHIP_KABINI: - adev->ip_blocks = kabini_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks); - break; - case CHIP_MULLINS: - adev->ip_blocks = mullins_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks); - break; - default: - /* FIXME: not supported yet */ - return -EINVAL; + if (adev->enable_virtual_display) { + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->ip_blocks = bonaire_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd); + break; + case CHIP_HAWAII: + adev->ip_blocks = hawaii_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd); + break; + case CHIP_KAVERI: + adev->ip_blocks = kaveri_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd); + break; + case CHIP_KABINI: + adev->ip_blocks = kabini_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd); + break; + case CHIP_MULLINS: + adev->ip_blocks = mullins_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + } else { + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->ip_blocks = bonaire_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks); + break; + case CHIP_HAWAII: + adev->ip_blocks = hawaii_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks); + break; + case CHIP_KAVERI: + adev->ip_blocks = kaveri_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks); + break; + case CHIP_KABINI: + adev->ip_blocks = kabini_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks); + break; + case CHIP_MULLINS: + adev->ip_blocks = mullins_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index ee64669..e71cd12 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -694,24 +694,16 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; - - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, - SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -719,39 +711,27 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, - SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } @@ -767,40 +747,21 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, * * Update the page tables using sDMA (CIK). */ -static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 2a11413..794c5f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -435,7 +435,11 @@ static int cz_dpm_init(struct amdgpu_device *adev) pi->caps_td_ramping = true; pi->caps_tcp_ramping = true; } - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; + pi->voting_clients = 0x00c00033; pi->auto_thermal_throttling_enabled = true; pi->bapm_enabled = false; @@ -2108,29 +2112,58 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) /* disable clockgating so we can properly shut down the block */ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n"); + return; + } + /* shutdown the UVD block */ ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_GATE); - /* XXX: check for errors */ + + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n"); + return; + } } cz_update_uvd_dpm(adev, gate); - if (pi->caps_uvd_pg) + if (pi->caps_uvd_pg) { /* power off the UVD block */ - cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF); + ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF); + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n"); + return; + } + } } else { if (pi->caps_uvd_pg) { /* power on the UVD block */ if (pi->uvd_dynamic_pg) - cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1); + ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1); else - cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); + ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); + + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n"); + return; + } + /* re-init the UVD block */ ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_UNGATE); + + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n"); + return; + } + /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_GATE); - /* XXX: check for errors */ + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n"); + return; + } } cz_update_uvd_dpm(adev, gate); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 741da36..bc5bb4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -646,8 +646,8 @@ static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev, if (save->crtc_enabled[i]) { tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); + if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) { + tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0); WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); @@ -712,6 +712,45 @@ static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_FIJI: + case CHIP_TONGA: + num_crtc = 6; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v10_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v10_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v10_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -2277,8 +2316,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); @@ -2700,7 +2739,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { .gamma_set = dce_v10_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v10_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2964,10 +3003,11 @@ static int dce_v10_0_early_init(void *handle) dce_v10_0_set_display_funcs(adev); dce_v10_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_FIJI: case CHIP_TONGA: - adev->mode_info.num_crtc = 6; /* XXX 7??? */ adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; break; @@ -3143,11 +3183,26 @@ static int dce_v10_0_wait_for_idle(void *handle) return 0; } +static int dce_v10_0_check_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (dce_v10_0_is_display_hung(adev)) + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true; + else + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false; + + return 0; +} + static int dce_v10_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0, tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) + return 0; + if (dce_v10_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3514,6 +3569,7 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = { .resume = dce_v10_0_resume, .is_idle = dce_v10_0_is_idle, .wait_for_idle = dce_v10_0_wait_for_idle, + .check_soft_reset = dce_v10_0_check_soft_reset, .soft_reset = dce_v10_0_soft_reset, .set_clockgating_state = dce_v10_0_set_clockgating_state, .set_powergating_state = dce_v10_0_set_powergating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h index 1bfa48d..e3dc04d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v10_0_ip_funcs; +void dce_v10_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 2282eb6..b93eba0 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -673,6 +673,53 @@ static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_CARRIZO: + num_crtc = 3; + break; + case CHIP_STONEY: + num_crtc = 2; + break; + case CHIP_POLARIS10: + num_crtc = 6; + break; + case CHIP_POLARIS11: + num_crtc = 5; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v11_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v11_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v11_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -2252,8 +2299,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); @@ -2710,7 +2757,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { .gamma_set = dce_v11_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v11_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -3001,24 +3048,22 @@ static int dce_v11_0_early_init(void *handle) dce_v11_0_set_display_funcs(adev); dce_v11_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_CARRIZO: - adev->mode_info.num_crtc = 3; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; case CHIP_STONEY: - adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; case CHIP_POLARIS10: - adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_POLARIS11: - adev->mode_info.num_crtc = 5; adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h index 84e4618..1f58a65 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v11_0_ip_funcs; +void dce_v11_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 8b7ad34..abd5213 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -604,6 +604,52 @@ static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + num_crtc = 6; + break; + case CHIP_KAVERI: + num_crtc = 4; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + num_crtc = 2; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v8_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v8_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v8_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -1501,13 +1547,13 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) if (sad->format == eld_reg_to_type[i][1]) { if (sad->channels > max_channels) { - value = (sad->channels << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | - (sad->byte2 << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | - (sad->freq << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); - max_channels = sad->channels; + value = (sad->channels << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | + (sad->byte2 << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | + (sad->freq << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); + max_channels = sad->channels; } if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) @@ -1613,7 +1659,7 @@ static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; uint32_t offset = dig->afmt->offset; - WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); + WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT)); WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz); WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); @@ -1693,6 +1739,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, /* Silent, r600_hdmi_enable will raise WARN for us */ if (!dig->afmt->enabled) return; + offset = dig->afmt->offset; /* hdmi deep color mode general control packets setup, if bpc > 8 */ @@ -1817,7 +1864,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset, HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */ - HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */ + HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */ WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset, (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */ @@ -1826,13 +1873,12 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset, AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */ - /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF); WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001); WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001); - /* enable audio after to setting up hw */ + /* enable audio after setting up hw */ dce_v8_0_audio_enable(adev, dig->afmt->pin, true); } @@ -2000,7 +2046,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, case DRM_FORMAT_XRGB4444: case DRM_FORMAT_ARGB4444: fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | - (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); + (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); #ifdef __BIG_ENDIAN fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif @@ -2139,8 +2185,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); @@ -2554,7 +2600,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { .gamma_set = dce_v8_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v8_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2655,7 +2701,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) case ATOM_PPLL2: /* disable the ppll */ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); break; case ATOM_PPLL0: /* disable the ppll */ @@ -2805,21 +2851,20 @@ static int dce_v8_0_early_init(void *handle) dce_v8_0_set_display_funcs(adev); dce_v8_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_BONAIRE: case CHIP_HAWAII: - adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_KAVERI: - adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; break; case CHIP_KABINI: case CHIP_MULLINS: - adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; /* ? */ break; @@ -3238,7 +3283,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, drm_handle_vblank(adev->ddev, crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); - break; case 1: /* vline */ if (disp_int & interrupt_status_offsets[crtc].vline) @@ -3247,7 +3291,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); DRM_DEBUG("IH: D%d vline\n", crtc + 1); - break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h index 7701685..7d0770c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v8_0_ip_funcs; +void dce_v8_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c new file mode 100644 index 0000000..00663a7b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -0,0 +1,806 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_i2c.h" +#include "atom.h" +#include "amdgpu_pll.h" +#include "amdgpu_connectors.h" +#ifdef CONFIG_DRM_AMDGPU_CIK +#include "dce_v8_0.h" +#endif +#include "dce_v10_0.h" +#include "dce_v11_0.h" +#include "dce_virtual.h" + +static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); +static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); +static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); + +/** + * dce_virtual_vblank_wait - vblank wait asic callback. + * + * @adev: amdgpu_device pointer + * @crtc: crtc to wait for vblank on + * + * Wait for vblank on the requested crtc (evergreen+). + */ +static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc) +{ + return; +} + +static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc) +{ + return 0; +} + +static void dce_virtual_page_flip(struct amdgpu_device *adev, + int crtc_id, u64 crtc_base, bool async) +{ + return; +} + +static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, + u32 *vbl, u32 *position) +{ + *vbl = 0; + *position = 0; + + return -EINVAL; +} + +static bool dce_virtual_hpd_sense(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + return true; +} + +static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + return; +} + +static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev) +{ + return 0; +} + +static bool dce_virtual_is_display_hung(struct amdgpu_device *adev) +{ + return false; +} + +void dce_virtual_stop_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: +#ifdef CONFIG_DRM_AMDGPU_CIK + dce_v8_0_disable_dce(adev); +#endif + break; + case CHIP_FIJI: + case CHIP_TONGA: + dce_v10_0_disable_dce(adev); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + dce_v11_0_disable_dce(adev); + break; + case CHIP_TOPAZ: + /* no DCE */ + return; + default: + DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); + } + + return; +} +void dce_virtual_resume_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + return; +} + +void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, + bool render) +{ + return; +} + +/** + * dce_virtual_bandwidth_update - program display watermarks + * + * @adev: amdgpu_device pointer + * + * Calculate and program the display watermarks and line + * buffer allocation (CIK). + */ +static void dce_virtual_bandwidth_update(struct amdgpu_device *adev) +{ + return; +} + +static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, + u16 *green, u16 *blue, uint32_t size) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int i; + + /* userspace palettes are always correct as is */ + for (i = 0; i < size; i++) { + amdgpu_crtc->lut_r[i] = red[i] >> 6; + amdgpu_crtc->lut_g[i] = green[i] >> 6; + amdgpu_crtc->lut_b[i] = blue[i] >> 6; + } + + return 0; +} + +static void dce_virtual_crtc_destroy(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(amdgpu_crtc); +} + +static const struct drm_crtc_funcs dce_virtual_crtc_funcs = { + .cursor_set2 = NULL, + .cursor_move = NULL, + .gamma_set = dce_virtual_crtc_gamma_set, + .set_config = amdgpu_crtc_set_config, + .destroy = dce_virtual_crtc_destroy, + .page_flip_target = amdgpu_crtc_page_flip_target, +}; + +static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; + + switch (mode) { + case DRM_MODE_DPMS_ON: + amdgpu_crtc->enabled = true; + /* Make sure VBLANK and PFLIP interrupts are still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); + drm_vblank_on(dev, amdgpu_crtc->crtc_id); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + drm_vblank_off(dev, amdgpu_crtc->crtc_id); + amdgpu_crtc->enabled = false; + break; + } +} + + +static void dce_virtual_crtc_prepare(struct drm_crtc *crtc) +{ + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); +} + +static void dce_virtual_crtc_commit(struct drm_crtc *crtc) +{ + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON); +} + +static void dce_virtual_crtc_disable(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + if (crtc->primary->fb) { + int r; + struct amdgpu_framebuffer *amdgpu_fb; + struct amdgpu_bo *rbo; + + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) + DRM_ERROR("failed to reserve rbo before unpin\n"); + else { + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + } + } + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; +} + +static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + /* update the hw version fpr dpm */ + amdgpu_crtc->hw_mode = *adjusted_mode; + + return 0; +} + +static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + amdgpu_crtc->encoder = encoder; + amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); + break; + } + } + if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + return false; + } + + return true; +} + + +static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return 0; +} + +static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc) +{ + return; +} + +static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + return 0; +} + +static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = { + .dpms = dce_virtual_crtc_dpms, + .mode_fixup = dce_virtual_crtc_mode_fixup, + .mode_set = dce_virtual_crtc_mode_set, + .mode_set_base = dce_virtual_crtc_set_base, + .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic, + .prepare = dce_virtual_crtc_prepare, + .commit = dce_virtual_crtc_commit, + .load_lut = dce_virtual_crtc_load_lut, + .disable = dce_virtual_crtc_disable, +}; + +static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) +{ + struct amdgpu_crtc *amdgpu_crtc; + int i; + + amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); + if (amdgpu_crtc == NULL) + return -ENOMEM; + + drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs); + + drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); + amdgpu_crtc->crtc_id = index; + adev->mode_info.crtcs[index] = amdgpu_crtc; + + for (i = 0; i < 256; i++) { + amdgpu_crtc->lut_r[i] = i << 2; + amdgpu_crtc->lut_g[i] = i << 2; + amdgpu_crtc->lut_b[i] = i << 2; + } + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs); + + return 0; +} + +static int dce_virtual_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; + dce_virtual_set_display_funcs(adev); + dce_virtual_set_irq_funcs(adev); + + adev->mode_info.num_crtc = 1; + adev->mode_info.num_hpd = 1; + adev->mode_info.num_dig = 1; + return 0; +} + +static bool dce_virtual_get_connector_info(struct amdgpu_device *adev) +{ + struct amdgpu_i2c_bus_rec ddc_bus; + struct amdgpu_router router; + struct amdgpu_hpd hpd; + + /* look up gpio for ddc, hpd */ + ddc_bus.valid = false; + hpd.hpd = AMDGPU_HPD_NONE; + /* needed for aux chan transactions */ + ddc_bus.hpd = hpd.hpd; + + memset(&router, 0, sizeof(router)); + router.ddc_valid = false; + router.cd_valid = false; + amdgpu_display_add_connector(adev, + 0, + ATOM_DEVICE_CRT1_SUPPORT, + DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus, + CONNECTOR_OBJECT_ID_VIRTUAL, + &hpd, + &router); + + amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL, + ATOM_DEVICE_CRT1_SUPPORT, + 0); + + amdgpu_link_encoder_connector(adev->ddev); + + return true; +} + +static int dce_virtual_sw_init(void *handle) +{ + int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq); + if (r) + return r; + + adev->ddev->max_vblank_count = 0; + + adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + adev->ddev->mode_config.preferred_depth = 24; + adev->ddev->mode_config.prefer_shadow = 1; + + adev->ddev->mode_config.fb_base = adev->mc.aper_base; + + r = amdgpu_modeset_create_props(adev); + if (r) + return r; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + /* allocate crtcs */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = dce_virtual_crtc_init(adev, i); + if (r) + return r; + } + + dce_virtual_get_connector_info(adev); + amdgpu_print_display_setup(adev->ddev); + + drm_kms_helper_poll_init(adev->ddev); + + adev->mode_info.mode_config_initialized = true; + return 0; +} + +static int dce_virtual_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + kfree(adev->mode_info.bios_hardcoded_edid); + + drm_kms_helper_poll_fini(adev->ddev); + + drm_mode_config_cleanup(adev->ddev); + adev->mode_info.mode_config_initialized = false; + return 0; +} + +static int dce_virtual_hw_init(void *handle) +{ + return 0; +} + +static int dce_virtual_hw_fini(void *handle) +{ + return 0; +} + +static int dce_virtual_suspend(void *handle) +{ + return dce_virtual_hw_fini(handle); +} + +static int dce_virtual_resume(void *handle) +{ + int ret; + + ret = dce_virtual_hw_init(handle); + + return ret; +} + +static bool dce_virtual_is_idle(void *handle) +{ + return true; +} + +static int dce_virtual_wait_for_idle(void *handle) +{ + return 0; +} + +static int dce_virtual_soft_reset(void *handle) +{ + return 0; +} + +static int dce_virtual_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int dce_virtual_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs dce_virtual_ip_funcs = { + .name = "dce_virtual", + .early_init = dce_virtual_early_init, + .late_init = NULL, + .sw_init = dce_virtual_sw_init, + .sw_fini = dce_virtual_sw_fini, + .hw_init = dce_virtual_hw_init, + .hw_fini = dce_virtual_hw_fini, + .suspend = dce_virtual_suspend, + .resume = dce_virtual_resume, + .is_idle = dce_virtual_is_idle, + .wait_for_idle = dce_virtual_wait_for_idle, + .soft_reset = dce_virtual_soft_reset, + .set_clockgating_state = dce_virtual_set_clockgating_state, + .set_powergating_state = dce_virtual_set_powergating_state, +}; + +/* these are handled by the primary encoders */ +static void dce_virtual_encoder_prepare(struct drm_encoder *encoder) +{ + return; +} + +static void dce_virtual_encoder_commit(struct drm_encoder *encoder) +{ + return; +} + +static void +dce_virtual_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return; +} + +static void dce_virtual_encoder_disable(struct drm_encoder *encoder) +{ + return; +} + +static void +dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + return; +} + +static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + + /* set the active encoder to connector routing */ + amdgpu_encoder_set_active_device(encoder); + + return true; +} + +static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = { + .dpms = dce_virtual_encoder_dpms, + .mode_fixup = dce_virtual_encoder_mode_fixup, + .prepare = dce_virtual_encoder_prepare, + .mode_set = dce_virtual_encoder_mode_set, + .commit = dce_virtual_encoder_commit, + .disable = dce_virtual_encoder_disable, +}; + +static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + + kfree(amdgpu_encoder->enc_priv); + drm_encoder_cleanup(encoder); + kfree(amdgpu_encoder); +} + +static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { + .destroy = dce_virtual_encoder_destroy, +}; + +static void dce_virtual_encoder_add(struct amdgpu_device *adev, + uint32_t encoder_enum, + uint32_t supported_device, + u16 caps) +{ + struct drm_device *dev = adev->ddev; + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + + /* see if we already added it */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->encoder_enum == encoder_enum) { + amdgpu_encoder->devices |= supported_device; + return; + } + + } + + /* add a new one */ + amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); + if (!amdgpu_encoder) + return; + + encoder = &amdgpu_encoder->base; + encoder->possible_crtcs = 0x1; + amdgpu_encoder->enc_priv = NULL; + amdgpu_encoder->encoder_enum = encoder_enum; + amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + amdgpu_encoder->devices = supported_device; + amdgpu_encoder->rmx_type = RMX_OFF; + amdgpu_encoder->underscan_type = UNDERSCAN_OFF; + amdgpu_encoder->is_ext_encoder = false; + amdgpu_encoder->caps = caps; + + drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL, NULL); + drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs); + DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id); +} + +static const struct amdgpu_display_funcs dce_virtual_display_funcs = { + .set_vga_render_state = &dce_virtual_set_vga_render_state, + .bandwidth_update = &dce_virtual_bandwidth_update, + .vblank_get_counter = &dce_virtual_vblank_get_counter, + .vblank_wait = &dce_virtual_vblank_wait, + .is_display_hung = &dce_virtual_is_display_hung, + .backlight_set_level = NULL, + .backlight_get_level = NULL, + .hpd_sense = &dce_virtual_hpd_sense, + .hpd_set_polarity = &dce_virtual_hpd_set_polarity, + .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg, + .page_flip = &dce_virtual_page_flip, + .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, + .add_encoder = &dce_virtual_encoder_add, + .add_connector = &amdgpu_connector_add, + .stop_mc_access = &dce_virtual_stop_mc_access, + .resume_mc_access = &dce_virtual_resume_mc_access, +}; + +static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) +{ + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dce_virtual_display_funcs; +} + +static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer) +{ + struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer); + struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info); + unsigned crtc = 0; + drm_handle_vblank(adev->ddev, crtc); + dce_virtual_pageflip_irq(adev, NULL, NULL); + hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); + return HRTIMER_NORESTART; +} + +static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + if (state && !adev->mode_info.vsync_timer_enabled) { + DRM_DEBUG("Enable software vsync timer\n"); + hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD)); + adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle; + hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); + } else if (!state && adev->mode_info.vsync_timer_enabled) { + DRM_DEBUG("Disable software vsync timer\n"); + hrtimer_cancel(&adev->mode_info.vblank_timer); + } + + adev->mode_info.vsync_timer_enabled = state; + DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state); +} + + +static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CRTC_IRQ_VBLANK1: + dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state); + break; + default: + break; + } + return 0; +} + +static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev, + int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } +} + +static int dce_virtual_crtc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned crtc = 0; + unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1; + + dce_virtual_crtc_vblank_int_ack(adev, crtc); + + if (amdgpu_irq_enabled(adev, source, irq_type)) { + drm_handle_vblank(adev->ddev, crtc); + } + dce_virtual_pageflip_irq(adev, NULL, NULL); + DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + return 0; +} + +static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + if (type >= adev->mode_info.num_crtc) { + DRM_ERROR("invalid pageflip crtc %d\n", type); + return -EINVAL; + } + DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state); + + return 0; +} + +static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned long flags; + unsigned crtc_id = 0; + struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_flip_work *works; + + crtc_id = 0; + amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + + if (crtc_id >= adev->mode_info.num_crtc) { + DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); + return -EINVAL; + } + + /* IRQ could occur when in initial stage */ + if (amdgpu_crtc == NULL) + return 0; + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + works = amdgpu_crtc->pflip_works; + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " + "AMDGPU_FLIP_SUBMITTED(%d)\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + return 0; + } + + /* page flip completed. clean up */ + amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; + amdgpu_crtc->pflip_works = NULL; + + /* wakeup usersapce */ + if (works->event) + drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + + drm_crtc_vblank_put(&amdgpu_crtc->base); + schedule_work(&works->unpin_work); + + return 0; +} + +static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { + .set = dce_virtual_set_crtc_irq_state, + .process = dce_virtual_crtc_irq, +}; + +static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = { + .set = dce_virtual_set_pageflip_irq_state, + .process = dce_virtual_pageflip_irq, +}; + +static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; + + adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; + adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h new file mode 100644 index 0000000..e239243 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h @@ -0,0 +1,31 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DCE_VIRTUAL_H__ +#define __DCE_VIRTUAL_H__ + +extern const struct amd_ip_funcs dce_virtual_ip_funcs; +#define DCE_VIRTUAL_VBLANK_PERIOD 16666666 + +#endif + diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index d869d05..f4fbec3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4465,24 +4465,21 @@ static int gfx_v7_0_sw_init(void *handle) } /* reserve GDS, GWS and OA resource for gfx */ - r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GDS, 0, - NULL, NULL, &adev->gds.gds_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, + &adev->gds.gds_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GWS, 0, - NULL, NULL, &adev->gds.gws_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, + &adev->gds.gws_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_OA, 0, - NULL, NULL, &adev->gds.oa_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, + &adev->gds.oa_gfx_bo, NULL, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index b818461..c6a63c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -703,7 +703,10 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) polaris10_golden_common_all, (const u32)ARRAY_SIZE(polaris10_golden_common_all)); WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C); - if (adev->pdev->revision == 0xc7) { + if (adev->pdev->revision == 0xc7 && + ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) || + (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) || + (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) { amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD); amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0); } @@ -1233,10 +1236,9 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) if (adev->gfx.rlc.clear_state_obj) { r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r); amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); adev->gfx.rlc.clear_state_obj = NULL; } @@ -1248,7 +1250,6 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); adev->gfx.rlc.cp_table_obj = NULL; } @@ -1290,14 +1291,14 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) &adev->gfx.rlc.clear_state_gpu_addr); if (r) { amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r); gfx_v8_0_rlc_fini(adev); return r; } r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); if (r) { - dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r); gfx_v8_0_rlc_fini(adev); return r; } @@ -1332,7 +1333,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) &adev->gfx.rlc.cp_table_gpu_addr); if (r) { amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); + dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r); return r; } r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); @@ -1345,7 +1346,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - } return 0; @@ -1361,7 +1361,6 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); adev->gfx.mec.hpd_eop_obj = NULL; } @@ -2082,24 +2081,21 @@ static int gfx_v8_0_sw_init(void *handle) } /* reserve GDS, GWS and OA resource for gfx */ - r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GDS, 0, NULL, - NULL, &adev->gds.gds_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, + &adev->gds.gds_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GWS, 0, NULL, - NULL, &adev->gds.gws_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, + &adev->gds.gws_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_OA, 0, NULL, - NULL, &adev->gds.oa_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, + &adev->gds.oa_gfx_bo, NULL, NULL); if (r) return r; @@ -2127,9 +2123,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); gfx_v8_0_mec_fini(adev); - gfx_v8_0_rlc_fini(adev); - gfx_v8_0_free_microcode(adev); return 0; @@ -3465,19 +3459,16 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, else data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); - if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + if (se_num == 0xffffffff) data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (se_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (sh_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } else { + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } + WREG32(mmGRBM_GFX_INDEX, data); } @@ -3490,11 +3481,10 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32(mmCC_RB_BACKEND_DISABLE); - data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + data = RREG32(mmCC_RB_BACKEND_DISABLE) | + RREG32(mmGC_USER_RB_BACKEND_DISABLE); - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; + data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE); mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se / adev->gfx.config.max_sh_per_se); @@ -3576,16 +3566,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) u32 tmp; int i; - tmp = RREG32(mmGRBM_CNTL); - tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff); - WREG32(mmGRBM_CNTL, tmp); - + WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF); WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); gfx_v8_0_tiling_mode_table_init(adev); - gfx_v8_0_setup_rb(adev); gfx_v8_0_get_cu_info(adev); @@ -3769,9 +3755,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) sizeof(indirect_start_offsets)/sizeof(int)); /* save and restore list */ - temp = RREG32(mmRLC_SRM_CNTL); - temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; - WREG32(mmRLC_SRM_CNTL, temp); + WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1); WREG32(mmRLC_SRM_ARAM_ADDR, 0); for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) @@ -3808,11 +3792,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev) { - uint32_t data; - - data = RREG32(mmRLC_SRM_CNTL); - data |= RLC_SRM_CNTL__SRM_ENABLE_MASK; - WREG32(mmRLC_SRM_CNTL, data); + WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1); } static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) @@ -3822,75 +3802,34 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_DMG)) { - data = RREG32(mmCP_RB_WPTR_POLL_CNTL); - data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; - data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); - WREG32(mmCP_RB_WPTR_POLL_CNTL, data); - - data = 0; - data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT); - WREG32(mmRLC_PG_DELAY, data); + WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60); - data = RREG32(mmRLC_PG_DELAY_2); - data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK; - data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT); - WREG32(mmRLC_PG_DELAY_2, data); + data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10); + WREG32(mmRLC_PG_DELAY, data); - data = RREG32(mmRLC_AUTO_PG_CTRL); - data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; - data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); - WREG32(mmRLC_AUTO_PG_CTRL, data); + WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3); + WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0); } } static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0); } static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0); } static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK; - else - data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0); } static void gfx_v8_0_init_pg(struct amdgpu_device *adev) @@ -3929,34 +3868,24 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev) void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmRLC_CNTL); - - tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); - WREG32(mmRLC_CNTL, tmp); + WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0); gfx_v8_0_enable_gui_idle_interrupt(adev, false); - gfx_v8_0_wait_for_rlc_serdes(adev); } static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmGRBM_SOFT_RESET); - - tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); - WREG32(mmGRBM_SOFT_RESET, tmp); + WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); udelay(50); - tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); - WREG32(mmGRBM_SOFT_RESET, tmp); + + WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); udelay(50); } static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmRLC_CNTL); - - tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1); - WREG32(mmRLC_CNTL, tmp); + WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1); /* carrizo do enable cp interrupt after cp inited */ if (!(adev->flags & AMD_IS_APU)) @@ -3998,14 +3927,13 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) /* disable CG */ WREG32(mmRLC_CGCG_CGLS_CTRL, 0); if (adev->asic_type == CHIP_POLARIS11 || - adev->asic_type == CHIP_POLARIS10) + adev->asic_type == CHIP_POLARIS10) WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0); /* disable PG */ WREG32(mmRLC_PG_CNTL, 0); gfx_v8_0_rlc_reset(adev); - gfx_v8_0_init_pg(adev); if (!adev->pp_enabled) { @@ -4300,12 +4228,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) gfx_v8_0_cp_gfx_start(adev); ring->ready = true; r = amdgpu_ring_test_ring(ring); - if (r) { + if (r) ring->ready = false; - return r; - } - return 0; + return r; } static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) @@ -4980,7 +4906,6 @@ static int gfx_v8_0_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfx_v8_0_init_golden_registers(adev); - gfx_v8_0_gpu_init(adev); r = gfx_v8_0_rlc_resume(adev); @@ -4988,8 +4913,6 @@ static int gfx_v8_0_hw_init(void *handle) return r; r = gfx_v8_0_cp_resume(adev); - if (r) - return r; return r; } @@ -5037,25 +4960,22 @@ static bool gfx_v8_0_is_idle(void *handle) static int gfx_v8_0_wait_for_idle(void *handle) { unsigned i; - u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK; - - if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) + if (gfx_v8_0_is_idle(handle)) return 0; + udelay(1); } return -ETIMEDOUT; } -static int gfx_v8_0_soft_reset(void *handle) +static int gfx_v8_0_check_soft_reset(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* GRBM_STATUS */ tmp = RREG32(mmGRBM_STATUS); @@ -5064,16 +4984,12 @@ static int gfx_v8_0_soft_reset(void *handle) GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | - GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { + GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK | + GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); - } - - if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { - grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, - GRBM_SOFT_RESET, SOFT_RESET_CP, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); } @@ -5084,73 +5000,199 @@ static int gfx_v8_0_soft_reset(void *handle) grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) || + REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) || + REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) { + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPF, 1); + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPC, 1); + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPG, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, + SOFT_RESET_GRBM, 1); + } + /* SRBM_STATUS */ tmp = RREG32(mmSRBM_STATUS); if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING)) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); + if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); if (grbm_soft_reset || srbm_soft_reset) { - /* stop the rlc */ - gfx_v8_0_rlc_stop(adev); + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true; + adev->gfx.grbm_soft_reset = grbm_soft_reset; + adev->gfx.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false; + adev->gfx.grbm_soft_reset = 0; + adev->gfx.srbm_soft_reset = 0; + } + + return 0; +} +static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + int i; + + vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) { + u32 tmp; + tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); + tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST, + DEQUEUE_REQ, 2); + WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp); + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK)) + break; + udelay(1); + } + } +} + +static int gfx_v8_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; + + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; + + /* stop the rlc */ + gfx_v8_0_rlc_stop(adev); + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) /* Disable GFX parsing/prefetching */ gfx_v8_0_cp_gfx_enable(adev, false); + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) { + int i; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + gfx_v8_0_inactive_hqd(adev, ring); + } /* Disable MEC parsing/prefetching */ gfx_v8_0_cp_compute_enable(adev, false); + } - if (grbm_soft_reset || srbm_soft_reset) { - tmp = RREG32(mmGMCON_DEBUG); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_STALL, 1); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_CLEAR, 1); - WREG32(mmGMCON_DEBUG, tmp); + return 0; +} - udelay(50); - } +static int gfx_v8_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + u32 tmp; - if (grbm_soft_reset) { - tmp = RREG32(mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; - udelay(50); + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; - tmp &= ~grbm_soft_reset; - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - } + if (grbm_soft_reset || srbm_soft_reset) { + tmp = RREG32(mmGMCON_DEBUG); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1); + WREG32(mmGMCON_DEBUG, tmp); + udelay(50); + } - if (srbm_soft_reset) { - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); + if (grbm_soft_reset) { + tmp = RREG32(mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); - udelay(50); + udelay(50); - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - } + tmp &= ~grbm_soft_reset; + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + } - if (grbm_soft_reset || srbm_soft_reset) { - tmp = RREG32(mmGMCON_DEBUG); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_STALL, 0); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_CLEAR, 0); - WREG32(mmGMCON_DEBUG, tmp); - } + if (srbm_soft_reset) { + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); - /* Wait a little for things to settle down */ udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); } + + if (grbm_soft_reset || srbm_soft_reset) { + tmp = RREG32(mmGMCON_DEBUG); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0); + WREG32(mmGMCON_DEBUG, tmp); + } + + /* Wait a little for things to settle down */ + udelay(50); + + return 0; +} + +static void gfx_v8_0_init_hqd(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0); + WREG32(mmCP_HQD_PQ_RPTR, 0); + WREG32(mmCP_HQD_PQ_WPTR, 0); + vi_srbm_select(adev, 0, 0, 0, 0); +} + +static int gfx_v8_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; + + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) + gfx_v8_0_cp_gfx_resume(adev); + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) { + int i; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + gfx_v8_0_init_hqd(adev, ring); + } + gfx_v8_0_cp_compute_resume(adev); + } + gfx_v8_0_rlc_start(adev); + return 0; } @@ -5269,8 +5311,6 @@ static int gfx_v8_0_late_init(void *handle) static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - if (adev->asic_type == CHIP_POLARIS11) /* Send msg to SMU via Powerplay */ amdgpu_set_powergating_state(adev, @@ -5278,83 +5318,35 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable static MGPG */ - if (enable) - data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0); } static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable dynamic MGPG */ - if (enable) - data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0); } static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable quick PG */ - if (enable) - data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0); } static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0); } static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0); /* Read any GFX register to wake up GFX. */ if (!enable) - data = RREG32(mmDB_RENDER_CONTROL); + RREG32(mmDB_RENDER_CONTROL); } static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev, @@ -5430,15 +5422,15 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, data = RREG32(mmRLC_SERDES_WR_CTRL); if (adev->asic_type == CHIP_STONEY) - data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | - RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | - RLC_SERDES_WR_CTRL__P1_SELECT_MASK | - RLC_SERDES_WR_CTRL__P2_SELECT_MASK | - RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | - RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | - RLC_SERDES_WR_CTRL__POWER_UP_MASK | - RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | - RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); + data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | + RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | + RLC_SERDES_WR_CTRL__P1_SELECT_MASK | + RLC_SERDES_WR_CTRL__P2_SELECT_MASK | + RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | + RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | + RLC_SERDES_WR_CTRL__POWER_UP_MASK | + RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | + RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); else data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | @@ -5461,10 +5453,10 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, #define MSG_ENTER_RLC_SAFE_MODE 1 #define MSG_EXIT_RLC_SAFE_MODE 0 - -#define RLC_GPR_REG2__REQ_MASK 0x00000001 -#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 -#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e +#define RLC_GPR_REG2__REQ_MASK 0x00000001 +#define RLC_GPR_REG2__REQ__SHIFT 0 +#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 +#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev) { @@ -5494,7 +5486,7 @@ static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) break; udelay(1); } @@ -5522,7 +5514,7 @@ static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) break; udelay(1); } @@ -5554,7 +5546,7 @@ static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } @@ -5581,7 +5573,7 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } @@ -5622,21 +5614,12 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { - if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) /* 1 - RLC memory Light sleep */ - temp = data = RREG32(mmRLC_MEM_SLP_CNTL); - data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; - if (temp != data) - WREG32(mmRLC_MEM_SLP_CNTL, data); - } + WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1); - if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { - /* 2 - CP memory Light sleep */ - temp = data = RREG32(mmCP_MEM_SLP_CNTL); - data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; - if (temp != data) - WREG32(mmCP_MEM_SLP_CNTL, data); - } + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) + WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1); } /* 3 - RLC_CGTT_MGCG_OVERRIDE */ @@ -5854,25 +5837,18 @@ static int gfx_v8_0_set_clockgating_state(void *handle, static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) { - u32 rptr; - - rptr = ring->adev->wb.wb[ring->rptr_offs]; - - return rptr; + return ring->adev->wb.wb[ring->rptr_offs]; } static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u32 wptr; if (ring->use_doorbell) /* XXX check if swapping is necessary on BE */ - wptr = ring->adev->wb.wb[ring->wptr_offs]; + return ring->adev->wb.wb[ring->wptr_offs]; else - wptr = RREG32(mmCP_RB0_WPTR); - - return wptr; + return RREG32(mmCP_RB0_WPTR); } static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) @@ -5971,9 +5947,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN - (2 << 0) | + (2 << 0) | #endif - (ib->gpu_addr & 0xFFFFFFFC)); + (ib->gpu_addr & 0xFFFFFFFC)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); amdgpu_ring_write(ring, control); } @@ -6118,33 +6094,14 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - TIME_STAMP_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = - REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - TIME_STAMP_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); } static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, int me, int pipe, enum amdgpu_interrupt_state state) { - u32 mec_int_cntl, mec_int_cntl_reg; - /* * amdgpu controls only pipe 0 of MEC1. That's why this function only * handles the setting of interrupts for this specific pipe. All other @@ -6154,7 +6111,6 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, if (me == 1) { switch (pipe) { case 0: - mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL; break; default: DRM_DEBUG("invalid pipe %d\n", pipe); @@ -6165,22 +6121,8 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, return; } - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); - mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, - TIME_STAMP_INT_ENABLE, 0); - WREG32(mec_int_cntl_reg, mec_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); - mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, - TIME_STAMP_INT_ENABLE, 1); - WREG32(mec_int_cntl_reg, mec_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); } static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, @@ -6188,24 +6130,8 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); return 0; } @@ -6215,24 +6141,8 @@ static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); return 0; } @@ -6338,7 +6248,10 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .resume = gfx_v8_0_resume, .is_idle = gfx_v8_0_is_idle, .wait_for_idle = gfx_v8_0_wait_for_idle, + .check_soft_reset = gfx_v8_0_check_soft_reset, + .pre_soft_reset = gfx_v8_0_pre_soft_reset, .soft_reset = gfx_v8_0_soft_reset, + .post_soft_reset = gfx_v8_0_post_soft_reset, .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, }; @@ -6479,15 +6392,12 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - - data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; - data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) | + RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh); - return (~data) & mask; + return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask; } static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h index bc82c79..ebed1f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h @@ -26,6 +26,4 @@ extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; -void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0b0f086..aa0c4b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -183,7 +183,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) const struct mc_firmware_header_v1_0 *hdr; const __le32 *fw_data = NULL; const __le32 *io_mc_regs = NULL; - u32 running, blackout = 0; + u32 running; int i, ucode_size, regs_size; if (!adev->mc.fw) @@ -203,11 +203,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); if (running == 0) { - if (running) { - blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); @@ -239,9 +234,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) break; udelay(1); } - - if (running) - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); } return 0; @@ -393,7 +385,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) * size equal to the 1024 or vram, whichever is larger. */ if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); + adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev); else adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; @@ -953,6 +945,11 @@ static int gmc_v7_0_sw_init(void *handle) return r; } + r = amdgpu_ttm_global_init(adev); + if (r) { + return r; + } + r = gmc_v7_0_mc_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 2aee2c6..84c10d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -261,7 +261,7 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) const struct mc_firmware_header_v1_0 *hdr; const __le32 *fw_data = NULL; const __le32 *io_mc_regs = NULL; - u32 running, blackout = 0; + u32 running; int i, ucode_size, regs_size; if (!adev->mc.fw) @@ -287,11 +287,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); if (running == 0) { - if (running) { - blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); @@ -323,9 +318,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) break; udelay(1); } - - if (running) - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); } return 0; @@ -477,7 +469,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) * size equal to the 1024 or vram, whichever is larger. */ if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); + adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev); else adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; @@ -957,6 +949,11 @@ static int gmc_v8_0_sw_init(void *handle) return r; } + r = amdgpu_ttm_global_init(adev); + if (r) { + return r; + } + r = gmc_v8_0_mc_init(adev); if (r) return r; @@ -1100,9 +1097,8 @@ static int gmc_v8_0_wait_for_idle(void *handle) } -static int gmc_v8_0_soft_reset(void *handle) +static int gmc_v8_0_check_soft_reset(void *handle) { - struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1117,13 +1113,42 @@ static int gmc_v8_0_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } - if (srbm_soft_reset) { - gmc_v8_0_mc_stop(adev, &save); - if (gmc_v8_0_wait_for_idle((void *)adev)) { - dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); - } + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true; + adev->mc.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false; + adev->mc.srbm_soft_reset = 0; + } + return 0; +} +static int gmc_v8_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + + gmc_v8_0_mc_stop(adev, &adev->mc.save); + if (gmc_v8_0_wait_for_idle(adev)) { + dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); + } + + return 0; +} + +static int gmc_v8_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + srbm_soft_reset = adev->mc.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1139,14 +1164,22 @@ static int gmc_v8_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - - gmc_v8_0_mc_resume(adev, &save); - udelay(50); } return 0; } +static int gmc_v8_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + + gmc_v8_0_mc_resume(adev, &adev->mc.save); + return 0; +} + static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -1414,7 +1447,10 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .resume = gmc_v8_0_resume, .is_idle = gmc_v8_0_is_idle, .wait_for_idle = gmc_v8_0_wait_for_idle, + .check_soft_reset = gmc_v8_0_check_soft_reset, + .pre_soft_reset = gmc_v8_0_pre_soft_reset, .soft_reset = gmc_v8_0_soft_reset, + .post_soft_reset = gmc_v8_0_post_soft_reset, .set_clockgating_state = gmc_v8_0_set_clockgating_state, .set_powergating_state = gmc_v8_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index a845e88..f8618a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2845,7 +2845,11 @@ static int kv_dpm_init(struct amdgpu_device *adev) pi->caps_tcp_ramping = true; } - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; + pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; if (amdgpu_bapm == 0) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 1351c7e..e822296 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -749,24 +749,16 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; - - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -774,39 +766,27 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } @@ -822,40 +802,21 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, * * Update the page tables using sDMA (CIK). */ -static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 653ce5e..bee4978 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -976,24 +976,16 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; - - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -1001,39 +993,27 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) -{ - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } +static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) +{ + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } @@ -1049,40 +1029,21 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, * * Update the page tables using sDMA (CIK). */ -static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** @@ -1320,28 +1281,79 @@ static int sdma_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v3_0_soft_reset(void *handle) +static int sdma_v3_0_check_soft_reset(void *handle) { - u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS2); - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) || + (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) { srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; - } - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; } if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true; + adev->sdma.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false; + adev->sdma.srbm_soft_reset = 0; + } + + return 0; +} + +static int sdma_v3_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + + if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || + REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { + sdma_v3_0_ctx_switch_enable(adev, false); + sdma_v3_0_enable(adev, false); + } + + return 0; +} + +static int sdma_v3_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + + if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || + REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { + sdma_v3_0_gfx_resume(adev); + sdma_v3_0_rlc_resume(adev); + } + + return 0; +} + +static int sdma_v3_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + u32 tmp; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + + if (srbm_soft_reset) { tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -1559,6 +1571,9 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .resume = sdma_v3_0_resume, .is_idle = sdma_v3_0_is_idle, .wait_for_idle = sdma_v3_0_wait_for_idle, + .check_soft_reset = sdma_v3_0_check_soft_reset, + .pre_soft_reset = sdma_v3_0_pre_soft_reset, + .post_soft_reset = sdma_v3_0_post_soft_reset, .soft_reset = sdma_v3_0_soft_reset, .set_clockgating_state = sdma_v3_0_set_clockgating_state, .set_powergating_state = sdma_v3_0_set_powergating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index c920558..d127d59 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -373,10 +373,10 @@ static int tonga_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int tonga_ih_soft_reset(void *handle) +static int tonga_ih_check_soft_reset(void *handle) { - u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -384,6 +384,48 @@ static int tonga_ih_soft_reset(void *handle) SOFT_RESET_IH, 1); if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true; + adev->irq.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false; + adev->irq.srbm_soft_reset = 0; + } + + return 0; +} + +static int tonga_ih_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + + return tonga_ih_hw_fini(adev); +} + +static int tonga_ih_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + + return tonga_ih_hw_init(adev); +} + +static int tonga_ih_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + srbm_soft_reset = adev->irq.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; + tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -427,7 +469,10 @@ const struct amd_ip_funcs tonga_ih_ip_funcs = { .resume = tonga_ih_resume, .is_idle = tonga_ih_is_idle, .wait_for_idle = tonga_ih_wait_for_idle, + .check_soft_reset = tonga_ih_check_soft_reset, + .pre_soft_reset = tonga_ih_pre_soft_reset, .soft_reset = tonga_ih_soft_reset, + .post_soft_reset = tonga_ih_post_soft_reset, .set_clockgating_state = tonga_ih_set_clockgating_state, .set_powergating_state = tonga_ih_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 132e613..10c0407 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -116,7 +116,7 @@ static int uvd_v4_2_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 101de136..8513376 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -112,7 +112,7 @@ static int uvd_v5_0_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 7f21102..2abe8a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -116,7 +116,7 @@ static int uvd_v6_0_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; @@ -396,21 +396,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uvd_v6_0_mc_resume(adev); - /* Set dynamic clock gating in S/W control mode */ - if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) { - uvd_v6_0_set_sw_clock_gating(adev); - } else { - /* disable clock gating */ - uint32_t data = RREG32(mmUVD_CGC_CTRL); - data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; - WREG32(mmUVD_CGC_CTRL, data); - } + /* disable clock gating */ + WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0); /* disable interupt */ - WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); + WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0); /* stall UMC and register bus before resetting VCPU */ - WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1); mdelay(1); /* put LMI, VCPU, RBC etc... into reset */ @@ -426,7 +419,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* take UVD block out of reset */ - WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0); mdelay(5); /* initialize UVD memory controller */ @@ -461,7 +454,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); /* enable UMC */ - WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0); /* boot up the VCPU */ WREG32(mmUVD_SOFT_RESET, 0); @@ -481,11 +474,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) break; DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); - WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1); mdelay(10); - WREG32_P(mmUVD_SOFT_RESET, 0, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0); mdelay(10); r = -1; } @@ -502,15 +493,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) /* clear the bit 4 of UVD_STATUS */ WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + /* force RBC into idle state */ rb_bufsz = order_base_2(ring->ring_size); - tmp = 0; - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); - /* force RBC into idle state */ WREG32(mmUVD_RBC_RB_CNTL, tmp); /* set the write pointer delay */ @@ -531,7 +521,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); - WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); + WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); return 0; } @@ -748,20 +738,82 @@ static int uvd_v6_0_wait_for_idle(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { - if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) + if (uvd_v6_0_is_idle(handle)) return 0; } return -ETIMEDOUT; } -static int uvd_v6_0_soft_reset(void *handle) +#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd +static int uvd_v6_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS); + + if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || + REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || + (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); + + if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true; + adev->uvd.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false; + adev->uvd.srbm_soft_reset = 0; + } + return 0; +} +static int uvd_v6_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; uvd_v6_0_stop(adev); + return 0; +} + +static int uvd_v6_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; + srbm_soft_reset = adev->uvd.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + } + + return 0; +} + +static int uvd_v6_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; - WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, - ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); mdelay(5); return uvd_v6_0_start(adev); @@ -902,21 +954,15 @@ static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - static int curstate = -1; if (adev->asic_type == CHIP_FIJI || - adev->asic_type == CHIP_POLARIS10) - uvd_v6_set_bypass_mode(adev, enable); + adev->asic_type == CHIP_POLARIS10) + uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false); if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) return 0; - if (curstate == state) - return 0; - - curstate = state; - if (enable) { + if (state == AMD_CG_STATE_GATE) { /* disable HW gating and enable Sw gating */ uvd_v6_0_set_sw_clock_gating(adev); } else { @@ -946,6 +992,8 @@ static int uvd_v6_0_set_powergating_state(void *handle, if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) return 0; + WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); + if (state == AMD_PG_STATE_GATE) { uvd_v6_0_stop(adev); return 0; @@ -966,7 +1014,10 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .resume = uvd_v6_0_resume, .is_idle = uvd_v6_0_is_idle, .wait_for_idle = uvd_v6_0_wait_for_idle, + .check_soft_reset = uvd_v6_0_check_soft_reset, + .pre_soft_reset = uvd_v6_0_pre_soft_reset, .soft_reset = uvd_v6_0_soft_reset, + .post_soft_reset = uvd_v6_0_post_soft_reset, .set_clockgating_state = uvd_v6_0_set_clockgating_state, .set_powergating_state = uvd_v6_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 80a37a6..5fa55b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -40,6 +40,7 @@ #define VCE_V2_0_FW_SIZE (256 * 1024) #define VCE_V2_0_STACK_SIZE (64 * 1024) #define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES) +#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 static void vce_v2_0_mc_resume(struct amdgpu_device *adev); static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev); @@ -96,6 +97,49 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmVCE_RB_WPTR2, ring->wptr); } +static int vce_v2_0_lmi_clean(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + uint32_t status = RREG32(mmVCE_LMI_STATUS); + + if (status & 0x337f) + return 0; + mdelay(10); + } + } + + return -ETIMEDOUT; +} + +static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + uint32_t status = RREG32(mmVCE_STATUS); + + if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) + return 0; + mdelay(10); + } + + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + } + + return -ETIMEDOUT; +} + /** * vce_v2_0_start - start VCE block * @@ -106,7 +150,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) static int vce_v2_0_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - int i, j, r; + int r; vce_v2_0_mc_resume(adev); @@ -127,36 +171,12 @@ static int vce_v2_0_start(struct amdgpu_device *adev) WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); mdelay(100); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - - for (i = 0; i < 10; ++i) { - uint32_t status; - for (j = 0; j < 100; ++j) { - status = RREG32(mmVCE_STATUS); - if (status & 2) - break; - mdelay(10); - } - r = 0; - if (status & 2) - break; - - DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - r = -1; - } + r = vce_v2_0_firmware_loaded(adev); /* clear BUSY flag */ WREG32_P(mmVCE_STATUS, 0, ~1); @@ -338,47 +358,50 @@ static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) { - u32 orig, tmp; + if (vce_v2_0_wait_for_idle(adev)) { + DRM_INFO("VCE is busy, Can't set clock gateing"); + return; + } - if (gated) { - if (vce_v2_0_wait_for_idle(adev)) { - DRM_INFO("VCE is busy, Can't set clock gateing"); - return; - } - WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(100); - WREG32(mmVCE_STATUS, 0); - } else { - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(100); + WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100); + + if (vce_v2_0_lmi_clean(adev)) { + DRM_INFO("LMI is busy, Can't set clock gateing"); + return; } - tmp = RREG32(mmVCE_CLOCK_GATING_B); - tmp &= ~0x00060006; + WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32(mmVCE_STATUS, 0); + + if (gated) + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); + /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */ if (gated) { - tmp |= 0xe10000; + /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */ + WREG32(mmVCE_CLOCK_GATING_B, 0xe90010); } else { - tmp |= 0xe1; - tmp &= ~0xe10000; + /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */ + WREG32(mmVCE_CLOCK_GATING_B, 0x800f1); } - WREG32(mmVCE_CLOCK_GATING_B, tmp); - orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING); - tmp &= ~0x1fe000; - tmp &= ~0xff000000; - if (tmp != orig) - WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/; + WREG32(mmVCE_UENC_CLOCK_GATING, 0x40); - orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - tmp &= ~0x3fc; - if (tmp != orig) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */ + WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00); - if (gated) - WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100); + if(!gated) { + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); + mdelay(100); + WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + vce_v2_0_firmware_loaded(adev); + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + } } static void vce_v2_0_disable_cg(struct amdgpu_device *adev) @@ -458,9 +481,7 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev) WREG32(mmVCE_VCPU_CACHE_SIZE2, size); WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); - - WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); vce_v2_0_init_cg(adev); } @@ -474,11 +495,11 @@ static bool vce_v2_0_is_idle(void *handle) static int vce_v2_0_wait_for_idle(void *handle) { - unsigned i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + unsigned i; for (i = 0; i < adev->usec_timeout; i++) { - if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) + if (vce_v2_0_is_idle(handle)) return 0; } return -ETIMEDOUT; @@ -488,8 +509,7 @@ static int vce_v2_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, - ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); + WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1); mdelay(5); return vce_v2_0_start(adev); @@ -516,10 +536,8 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, DRM_DEBUG("IH: VCE\n"); switch (entry->src_data) { case 0: - amdgpu_fence_process(&adev->vce.ring[0]); - break; case 1: - amdgpu_fence_process(&adev->vce.ring[1]); + amdgpu_fence_process(&adev->vce.ring[entry->src_data]); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index c271abf..615b8b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -37,6 +37,9 @@ #include "gca/gfx_8_0_d.h" #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" + #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 @@ -107,102 +110,72 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) { - u32 tmp, data; - - tmp = data = RREG32(mmVCE_RB_ARB_CTRL); - if (override) - data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - else - data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - - if (tmp != data) - WREG32(mmVCE_RB_ARB_CTRL, data); + WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0); } static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, bool gated) { - u32 tmp, data; + u32 data; + /* Set Override to disable Clock Gating */ vce_v3_0_override_vce_clock_gating(adev, true); - if (!gated) { - /* Force CLOCK ON for VCE_CLOCK_GATING_B, - * {*_FORCE_ON, *_FORCE_OFF} = {1, 0} - * VREG can be FORCE ON or set to Dynamic, but can't be OFF - */ - tmp = data = RREG32(mmVCE_CLOCK_GATING_B); + /* This function enables MGCG which is controlled by firmware. + With the clocks in the gated state the core is still + accessible but the firmware will throttle the clocks on the + fly as necessary. + */ + if (gated) { + data = RREG32(mmVCE_CLOCK_GATING_B); data |= 0x1ff; data &= ~0xef0000; - if (tmp != data) - WREG32(mmVCE_CLOCK_GATING_B, data); + WREG32(mmVCE_CLOCK_GATING_B, data); - /* Force CLOCK ON for VCE_UENC_CLOCK_GATING, - * {*_FORCE_ON, *_FORCE_OFF} = {1, 0} - */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); + data = RREG32(mmVCE_UENC_CLOCK_GATING); data |= 0x3ff000; data &= ~0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING, data); + WREG32(mmVCE_UENC_CLOCK_GATING, data); - /* set VCE_UENC_CLOCK_GATING_2 */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); + data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data |= 0x2; - data &= ~0x2; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING_2, data); + data &= ~0x00010000; + WREG32(mmVCE_UENC_CLOCK_GATING_2, data); - /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */ - tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data |= 0x37f; - if (tmp != data) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); + WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); - /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */ - tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); + data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8; - if (tmp != data) - WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); + VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | + 0x8; + WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); } else { - /* Force CLOCK OFF for VCE_CLOCK_GATING_B, - * {*, *_FORCE_OFF} = {*, 1} - * set VREG to Dynamic, as it can't be OFF - */ - tmp = data = RREG32(mmVCE_CLOCK_GATING_B); + data = RREG32(mmVCE_CLOCK_GATING_B); data &= ~0x80010; data |= 0xe70008; - if (tmp != data) - WREG32(mmVCE_CLOCK_GATING_B, data); - /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING, - * Force ClOCK OFF takes precedent over Force CLOCK ON setting. - * {*_FORCE_ON, *_FORCE_OFF} = {*, 1} - */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); + WREG32(mmVCE_CLOCK_GATING_B, data); + + data = RREG32(mmVCE_UENC_CLOCK_GATING); data |= 0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING, data); - /* Set VCE_UENC_CLOCK_GATING_2 */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); + WREG32(mmVCE_UENC_CLOCK_GATING, data); + + data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data |= 0x10000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING_2, data); - /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */ - tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + WREG32(mmVCE_UENC_CLOCK_GATING_2, data); + + data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data &= ~0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); - /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */ - tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); + WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); + + data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8); - if (tmp != data) - WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); + VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | + 0x8); + WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); } vce_v3_0_override_vce_clock_gating(adev, false); } @@ -221,12 +194,9 @@ static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) } DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); mdelay(10); } @@ -264,38 +234,22 @@ static int vce_v3_0_start(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - if (idx == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); vce_v3_0_mc_resume(adev, idx); - - WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK, - ~VCE_STATUS__JOB_BUSY_MASK); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); else - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, - ~VCE_VCPU_CNTL__CLK_EN_MASK); - - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); mdelay(100); r = vce_v3_0_firmware_loaded(adev); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); - - /* Set Clock-Gating off */ - if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) - vce_v3_0_set_vce_sw_clock_gating(adev, false); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); if (r) { DRM_ERROR("VCE not responding, giving up!!!\n"); @@ -304,7 +258,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) } } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -319,33 +273,25 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - if (idx == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); else - WREG32_P(mmVCE_VCPU_CNTL, 0, - ~VCE_VCPU_CNTL__CLK_EN_MASK); + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); + /* hold on ECPU */ - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); /* Set Clock-Gating off */ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) vce_v3_0_set_vce_sw_clock_gating(adev, false); } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -534,7 +480,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); - WREG32(mmVCE_CLOCK_GATING_B, 0xf7); + WREG32(mmVCE_CLOCK_GATING_B, 0x1FF); WREG32(mmVCE_LMI_CTRL, 0x00398000); WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); @@ -573,9 +519,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) } WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); - - WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); } static bool vce_v3_0_is_idle(void *handle) @@ -601,20 +545,108 @@ static int vce_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } +#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ +#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ +#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ +#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ + VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) + +static int vce_v3_0_check_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + + /* According to VCE team , we should use VCE_STATUS instead + * SRBM_STATUS.VCE_BUSY bit for busy status checking. + * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE + * instance's registers are accessed + * (0 for 1st instance, 10 for 2nd instance). + * + *VCE_STATUS + *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | + *|----+----+-----------+----+----+----+----------+---------+----| + *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| + * + * VCE team suggest use bit 3--bit 6 for busy status check + */ + mutex_lock(&adev->grbm_idx_mutex); + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); + } + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); + if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); + } + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + + if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true; + adev->vce.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false; + adev->vce.srbm_soft_reset = 0; + } + mutex_unlock(&adev->grbm_idx_mutex); + return 0; +} + static int vce_v3_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 mask = 0; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; + srbm_soft_reset = adev->vce.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + } + + return 0; +} + +static int vce_v3_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; + + mdelay(5); - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK; - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK; + return vce_v3_0_suspend(adev); +} + + +static int vce_v3_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; - WREG32_P(mmSRBM_SOFT_RESET, mask, - ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK | - SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK)); mdelay(5); - return vce_v3_0_start(adev); + return vce_v3_0_resume(adev); } static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, @@ -637,9 +669,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, { DRM_DEBUG("IH: VCE\n"); - WREG32_P(mmVCE_SYS_INT_STATUS, - VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK, - ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK); + WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); switch (entry->src_data) { case 0: @@ -686,13 +716,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, if (adev->vce.harvest_config & (1 << i)) continue; - if (i == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); if (enable) { /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ @@ -711,7 +735,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, vce_v3_0_set_vce_sw_clock_gating(adev, enable); } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -751,7 +775,10 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = { .resume = vce_v3_0_resume, .is_idle = vce_v3_0_is_idle, .wait_for_idle = vce_v3_0_wait_for_idle, + .check_soft_reset = vce_v3_0_check_soft_reset, + .pre_soft_reset = vce_v3_0_pre_soft_reset, .soft_reset = vce_v3_0_soft_reset, + .post_soft_reset = vce_v3_0_post_soft_reset, .set_clockgating_state = vce_v3_0_set_clockgating_state, .set_powergating_state = vce_v3_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 03a31c5..f2e8aa1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -77,6 +77,7 @@ #if defined(CONFIG_DRM_AMD_ACP) #include "amdgpu_acp.h" #endif +#include "dce_virtual.h" MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); @@ -822,6 +823,60 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 4, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 4, + .rev = 0, + .funcs = &iceland_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 4, + .rev = 0, + .funcs = &sdma_v2_4_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version tonga_ip_blocks[] = { /* ORDER MATTERS! */ @@ -890,6 +945,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 10, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 5, + .minor = 0, + .rev = 0, + .funcs = &uvd_v5_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version fiji_ip_blocks[] = { /* ORDER MATTERS! */ @@ -958,6 +1081,74 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 5, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 10, + .minor = 1, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1026,6 +1217,74 @@ static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 1, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 11, + .minor = 2, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 3, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 4, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version cz_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1103,34 +1362,142 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] = #endif }; +static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &cz_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 11, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +#if defined(CONFIG_DRM_AMD_ACP) + { + .type = AMD_IP_BLOCK_TYPE_ACP, + .major = 2, + .minor = 2, + .rev = 0, + .funcs = &acp_ip_funcs, + }, +#endif +}; + int vi_set_ip_blocks(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_TOPAZ: - adev->ip_blocks = topaz_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); - break; - case CHIP_FIJI: - adev->ip_blocks = fiji_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); - break; - case CHIP_TONGA: - adev->ip_blocks = tonga_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); - break; - case CHIP_POLARIS11: - case CHIP_POLARIS10: - adev->ip_blocks = polaris11_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); - break; - case CHIP_CARRIZO: - case CHIP_STONEY: - adev->ip_blocks = cz_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); - break; - default: - /* FIXME: not supported yet */ - return -EINVAL; + if (adev->enable_virtual_display) { + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->ip_blocks = topaz_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd); + break; + case CHIP_FIJI: + adev->ip_blocks = fiji_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd); + break; + case CHIP_TONGA: + adev->ip_blocks = tonga_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd); + break; + case CHIP_POLARIS11: + case CHIP_POLARIS10: + adev->ip_blocks = polaris11_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd); + break; + + case CHIP_CARRIZO: + case CHIP_STONEY: + adev->ip_blocks = cz_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + } else { + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->ip_blocks = topaz_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); + break; + case CHIP_FIJI: + adev->ip_blocks = fiji_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); + break; + case CHIP_TONGA: + adev->ip_blocks = tonga_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); + break; + case CHIP_POLARIS11: + case CHIP_POLARIS10: + adev->ip_blocks = polaris11_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + adev->ip_blocks = cz_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } } return 0; @@ -1248,8 +1615,17 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCE_MGCG; + /* rev0 hardware requires workarounds to support PG */ adev->pg_flags = 0; + if (adev->rev_id != 0x00) { + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_PIPELINE | + AMD_PG_SUPPORT_UVD | + AMD_PG_SUPPORT_VCE; + } adev->external_rev_id = adev->rev_id + 0x1; break; case CHIP_STONEY: @@ -1267,7 +1643,13 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCE_MGCG; + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_PIPELINE | + AMD_PG_SUPPORT_UVD | + AMD_PG_SUPPORT_VCE; adev->external_rev_id = adev->rev_id + 0x1; break; default: diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index a74a0d2..db71041 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -159,8 +159,14 @@ struct amd_ip_funcs { bool (*is_idle)(void *handle); /* poll for idle */ int (*wait_for_idle)(void *handle); + /* check soft reset the IP block */ + int (*check_soft_reset)(void *handle); + /* pre soft reset the IP block */ + int (*pre_soft_reset)(void *handle); /* soft reset the IP block */ int (*soft_reset)(void *handle); + /* post soft reset the IP block */ + int (*post_soft_reset)(void *handle); /* enable/disable cg for the IP block */ int (*set_clockgating_state)(void *handle, enum amd_clockgating_state state); diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h index f3e53b1..19802e9 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h @@ -34,6 +34,7 @@ #define mmUVD_UDEC_ADDR_CONFIG 0x3bd3 #define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4 #define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5 +#define mmUVD_NO_OP 0x3bff #define mmUVD_SEMA_CNTL 0x3d00 #define mmUVD_LMI_EXT40_ADDR 0x3d26 #define mmUVD_CTX_INDEX 0x3d28 diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h index eb4cf53..cc972d2 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h @@ -34,6 +34,7 @@ #define mmUVD_UDEC_ADDR_CONFIG 0x3bd3 #define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4 #define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5 +#define mmUVD_NO_OP 0x3bff #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67 diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h index ec69869..378f4b6 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h @@ -35,6 +35,7 @@ #define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4 #define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5 #define mmUVD_POWER_STATUS_U 0x3bfd +#define mmUVD_NO_OP 0x3bff #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67 diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index abbb658b..2de34a5 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -31,6 +31,7 @@ #include "eventmanager.h" #include "pp_debug.h" + #define PP_CHECK(handle) \ do { \ if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \ @@ -162,12 +163,12 @@ static int pp_hw_fini(void *handle) pp_handle = (struct pp_instance *)handle; eventmgr = pp_handle->eventmgr; - if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL) + if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL) eventmgr->pp_eventmgr_fini(eventmgr); smumgr = pp_handle->smu_mgr; - if (smumgr != NULL || smumgr->smumgr_funcs != NULL || + if (smumgr != NULL && smumgr->smumgr_funcs != NULL && smumgr->smumgr_funcs->smu_fini != NULL) smumgr->smumgr_funcs->smu_fini(smumgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index f7ce4cb..abbcbc9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -4,13 +4,15 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ hardwaremanager.o pp_acpi.o cz_hwmgr.o \ - cz_clockpowergating.o \ + cz_clockpowergating.o tonga_powertune.o\ tonga_processpptables.o ppatomctrl.o \ tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ fiji_clockpowergating.o fiji_thermal.o \ polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \ - polaris10_clockpowergating.o + polaris10_clockpowergating.o iceland_hwmgr.o \ + iceland_clockpowergating.o iceland_thermal.o \ + iceland_powertune.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 8cc0df9..5ecef17 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -178,7 +178,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) int result; cz_hwmgr->gfx_ramp_step = 256*25/100; - cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */ for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) @@ -186,33 +185,19 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) cz_hwmgr->mgcg_cgtt_local0 = 0x00000000; cz_hwmgr->mgcg_cgtt_local1 = 0x00000000; - cz_hwmgr->clock_slow_down_freq = 25000; - cz_hwmgr->skip_clock_slow_down = 1; - cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */ - cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */ - cz_hwmgr->voting_rights_clients = 0x00C00033; - cz_hwmgr->static_screen_threshold = 8; - cz_hwmgr->ddi_power_gating_disabled = 0; - cz_hwmgr->bapm_enabled = 1; - cz_hwmgr->voltage_drop_threshold = 0; - cz_hwmgr->gfx_power_gating_threshold = 500; - cz_hwmgr->vce_slow_sclk_threshold = 20000; - cz_hwmgr->dce_slow_sclk_threshold = 30000; - cz_hwmgr->disable_driver_thermal_policy = 1; - cz_hwmgr->disable_nb_ps3_in_battery = 0; phm_cap_unset(hwmgr->platform_descriptor.platformCaps, @@ -221,9 +206,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NonABMSupportInPPLib); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicM3Arbiter); @@ -233,9 +215,7 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_DynamicPatchPowerState); cz_hwmgr->thermal_auto_throttling_treshold = 0; - cz_hwmgr->tdr_clock = 0; - cz_hwmgr->disable_gfx_power_gating_in_uvd = 0; phm_cap_set(hwmgr->platform_descriptor.platformCaps, @@ -450,19 +430,12 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr) (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index; cz_hwmgr->boot_power_level.dsDividerIndex = 0; - cz_hwmgr->boot_power_level.ssDividerIndex = 0; - cz_hwmgr->boot_power_level.allowGnbSlow = 1; - cz_hwmgr->boot_power_level.forceNBPstate = 0; - cz_hwmgr->boot_power_level.hysteresis_up = 0; - cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0; - cz_hwmgr->boot_power_level.display_wm = 0; - cz_hwmgr->boot_power_level.vce_wm = 0; return 0; @@ -749,7 +722,6 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; clock = hwmgr->display_config.min_core_set_clock; -; if (clock == 0) printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n"); @@ -832,7 +804,7 @@ static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetWatermarkFrequency, - cz_hwmgr->sclk_dpm.soft_max_clk); + cz_hwmgr->sclk_dpm.soft_max_clk); return 0; } @@ -858,9 +830,9 @@ static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr, PP_DBG_LOG("enabling ALL SMU features.\n"); dpm_features |= NB_DPM_MASK; ret = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - PPSMC_MSG_EnableAllSmuFeatures, - dpm_features); + hwmgr->smumgr, + PPSMC_MSG_EnableAllSmuFeatures, + dpm_features); if (ret == 0) cz_hwmgr->is_nb_dpm_enabled = true; } @@ -1246,7 +1218,7 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - if (hwmgr != NULL || hwmgr->backend != NULL) { + if (hwmgr != NULL && hwmgr->backend != NULL) { kfree(hwmgr->backend); kfree(hwmgr); } @@ -1402,10 +1374,12 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) PPSMC_MSG_SetUvdHardMin)); cz_enable_disable_uvd_dpm(hwmgr, true); - } else + } else { cz_enable_disable_uvd_dpm(hwmgr, true); - } else + } + } else { cz_enable_disable_uvd_dpm(hwmgr, false); + } return 0; } @@ -1690,13 +1664,10 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); if (separation_time != - hw_data->cc6_settings.cpu_pstate_separation_time - || cc6_disable != - hw_data->cc6_settings.cpu_cc6_disable - || pstate_disable != - hw_data->cc6_settings.cpu_pstate_disable - || pstate_switch_disable != - hw_data->cc6_settings.nb_pstate_switch_disable) { + hw_data->cc6_settings.cpu_pstate_separation_time || + cc6_disable != hw_data->cc6_settings.cpu_cc6_disable || + pstate_disable != hw_data->cc6_settings.cpu_pstate_disable || + pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) { hw_data->cc6_settings.cc6_setting_changed = true; @@ -1799,8 +1770,7 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p ps = cast_const_PhwCzPowerState(state); level_index = index > ps->level - 1 ? ps->level - 1 : index; - - level->coreClock = ps->levels[level_index].engineClock; + level->coreClock = ps->levels[level_index].engineClock; if (designation == PHM_PerformanceLevelDesignation_PowerContainment) { for (i = 1; i < ps->level; i++) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 6483d68..9368e21 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -618,9 +618,6 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TablelessHardwareInterface); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - data->gpio_debug = 0; phm_cap_set(hwmgr->platform_descriptor.platformCaps, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c index 4465845..f5992ea 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c @@ -57,8 +57,6 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) /* Assume disabled */ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); @@ -77,9 +75,8 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) fiji_hwmgr->fast_watermark_threshold = 100; - if (hwmgr->powercontainment_enabled) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { tmp = 1; fiji_hwmgr->enable_dte_feature = tmp ? false : true; fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 27e0762..d829076 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -39,6 +39,26 @@ extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr); extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); +extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); + +static int hwmgr_set_features_platform_caps(struct pp_hwmgr *hwmgr) +{ + if (amdgpu_sclk_deep_sleep_en) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + + if (amdgpu_powercontainment) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + + return 0; +} int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) { @@ -57,9 +77,12 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) hwmgr->chip_family = pp_init->chip_family; hwmgr->chip_id = pp_init->chip_id; hwmgr->hw_revision = pp_init->rev_id; + hwmgr->sub_sys_id = pp_init->sub_sys_id; + hwmgr->sub_vendor_id = pp_init->sub_vendor_id; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; hwmgr->power_source = PP_PowerSource_AC; - hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled; + + hwmgr_set_features_platform_caps(hwmgr); switch (hwmgr->chip_family) { case AMDGPU_FAMILY_CZ: @@ -67,6 +90,9 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case AMDGPU_FAMILY_VI: switch (hwmgr->chip_id) { + case CHIP_TOPAZ: + iceland_hwmgr_init(hwmgr); + break; case CHIP_TONGA: tonga_hwmgr_init(hwmgr); break; @@ -182,29 +208,7 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, return 0; } -int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t index, uint32_t value, uint32_t mask) -{ - uint32_t i; - uint32_t cur_value; - if (hwmgr == NULL || hwmgr->device == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); - return -EINVAL; - } - - for (i = 0; i < hwmgr->usec_timeout; i++) { - cur_value = cgs_read_register(hwmgr->device, index); - if ((cur_value & mask) != (value & mask)) - break; - udelay(1); - } - - /* timeout means wrong logic*/ - if (i == hwmgr->usec_timeout) - return -1; - return 0; -} /** @@ -227,21 +231,7 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); } -void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask) -{ - if (hwmgr == NULL || hwmgr->device == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); - return; - } - cgs_write_register(hwmgr->device, indirect_port, index); - phm_wait_for_register_unequal(hwmgr, indirect_port + 1, - value, mask); -} bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c new file mode 100644 index 0000000..47949f5 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c @@ -0,0 +1,119 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ + +#include "hwmgr.h" +#include "iceland_clockpowergating.h" +#include "ppsmc.h" +#include "iceland_hwmgr.h" + +int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +static int iceland_phm_powerup_uvd(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +static int iceland_phm_powerdown_vce(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +static int iceland_phm_powerup_vce(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum + PHM_AsicBlock block, enum PHM_ClockGateSetting gating) +{ + int ret = 0; + + switch (block) { + case PHM_AsicBlock_UVD_MVC: + case PHM_AsicBlock_UVD: + case PHM_AsicBlock_UVD_HD: + case PHM_AsicBlock_UVD_SD: + if (gating == PHM_ClockGateSetting_StaticOff) + ret = iceland_phm_powerdown_uvd(hwmgr); + else + ret = iceland_phm_powerup_uvd(hwmgr); + break; + case PHM_AsicBlock_GFX: + default: + break; + } + + return ret; +} + +int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = false; + data->vce_power_gated = false; + + iceland_phm_powerup_uvd(hwmgr); + iceland_phm_powerup_vce(hwmgr); + + return 0; +} + +int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (bgate) { + iceland_update_uvd_dpm(hwmgr, true); + iceland_phm_powerdown_uvd(hwmgr); + } else { + iceland_phm_powerup_uvd(hwmgr); + iceland_update_uvd_dpm(hwmgr, false); + } + + return 0; +} + +int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (bgate) + return iceland_phm_powerdown_vce(hwmgr); + else + return iceland_phm_powerup_vce(hwmgr); + + return 0; +} + +int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, + const uint32_t *msg_id) +{ + /* iceland does not have MM hardware block */ + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h new file mode 100644 index 0000000..ff5ef00 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h @@ -0,0 +1,38 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ + +#ifndef _ICELAND_CLOCK_POWER_GATING_H_ +#define _ICELAND_CLOCK_POWER_GATING_H_ + +#include "iceland_hwmgr.h" +#include "pp_asicblocks.h" + +extern int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); +extern int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +extern int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +extern int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr); +extern int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); +extern int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); +#endif /* _ICELAND_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h new file mode 100644 index 0000000..a7b4bc6 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h @@ -0,0 +1,41 @@ +#ifndef ICELAND_DYN_DEFAULTS_H +#define ICELAND_DYN_DEFAULTS_H + +enum ICELANDdpm_TrendDetection +{ + ICELANDdpm_TrendDetection_AUTO, + ICELANDdpm_TrendDetection_UP, + ICELANDdpm_TrendDetection_DOWN +}; +typedef enum ICELANDdpm_TrendDetection ICELANDdpm_TrendDetection; + + +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 + + +#define PPICELAND_THERMALPROTECTCOUNTER_DFLT 0x200 + +#define PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT 0 + +#define PPICELAND_STATICSCREENTHRESHOLD_DFLT 0x00C8 + +#define PPICELAND_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 + +#define PPICELAND_REFERENCEDIVIDER_DFLT 4 + +#define PPICELAND_ULVVOLTAGECHANGEDELAY_DFLT 1687 + +#define PPICELAND_CGULVPARAMETER_DFLT 0x00040035 +#define PPICELAND_CGULVCONTROL_DFLT 0x00007450 +#define PPICELAND_TARGETACTIVITY_DFLT 30 +#define PPICELAND_MCLK_TARGETACTIVITY_DFLT 10 + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c new file mode 100644 index 0000000..8a7ada5 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c @@ -0,0 +1,5692 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/fb.h> +#include "linux/delay.h" +#include "pp_acpi.h" +#include "hwmgr.h" +#include <atombios.h> +#include "iceland_hwmgr.h" +#include "pptable.h" +#include "processpptables.h" +#include "pp_debug.h" +#include "ppsmc.h" +#include "cgs_common.h" +#include "pppcielanes.h" +#include "iceland_dyn_defaults.h" +#include "smumgr.h" +#include "iceland_smumgr.h" +#include "iceland_clockpowergating.h" +#include "iceland_thermal.h" +#include "iceland_powertune.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" + +#include "cgs_linux.h" +#include "eventmgr.h" +#include "amd_pcie_helpers.h" + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define MC_CG_SEQ_DRAMCONF_S0 0x05 +#define MC_CG_SEQ_DRAMCONF_S1 0x06 +#define MC_CG_SEQ_YCLK_SUSPEND 0x04 +#define MC_CG_SEQ_YCLK_RESUME 0x0a + +#define PCIE_BUS_CLK 10000 +#define TCLK (PCIE_BUS_CLK / 10) + +#define SMC_RAM_END 0x40000 +#define SMC_CG_IND_START 0xc0030000 +#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/ + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +const uint32_t iceland_magic = (uint32_t)(PHM_VIslands_Magic); + +#define MC_SEQ_MISC0_GDDR5_SHIFT 28 +#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 +#define MC_SEQ_MISC0_GDDR5_VALUE 5 + +/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ +enum DPM_EVENT_SRC { + DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */ + DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */ + DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */ + DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */ + DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */ +}; + +static int iceland_read_clock_registers(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + data->clock_registers.vCG_SPLL_FUNC_CNTL = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); + data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); + data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); + data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); + data->clock_registers.vDLL_CNTL = + cgs_read_register(hwmgr->device, mmDLL_CNTL); + data->clock_registers.vMCLK_PWRMGT_CNTL = + cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); + data->clock_registers.vMPLL_AD_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); + data->clock_registers.vMPLL_DQ_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL_1 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); + data->clock_registers.vMPLL_FUNC_CNTL_2 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); + data->clock_registers.vMPLL_SS1 = + cgs_read_register(hwmgr->device, mmMPLL_SS1); + data->clock_registers.vMPLL_SS2 = + cgs_read_register(hwmgr->device, mmMPLL_SS2); + + return 0; +} + +/** + * Find out if memory is GDDR5. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_get_memory_type(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t temp; + + temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); + + data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE == + ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> + MC_SEQ_MISC0_GDDR5_SHIFT)); + + return 0; +} + +int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + /* iceland does not have MM hardware blocks */ + return 0; +} + +/** + * Enables Dynamic Power Management by SMC + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_enable_acpi_power_management(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1); + + return 0; +} + +/** + * Find the MC microcode version and store it in the HwMgr struct + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_get_mc_microcode_version(struct pp_hwmgr *hwmgr) +{ + cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); + + hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); + + return 0; +} + +static int iceland_init_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + data->low_sclk_interrupt_threshold = 0; + + return 0; +} + + +static int iceland_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = iceland_read_clock_registers(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to read clock registers!", result = tmp_result); + + tmp_result = iceland_get_memory_type(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get memory type!", result = tmp_result); + + tmp_result = iceland_enable_acpi_power_management(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable ACPI power management!", result = tmp_result); + + tmp_result = iceland_get_mc_microcode_version(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get MC microcode version!", result = tmp_result); + + tmp_result = iceland_init_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to init sclk threshold!", result = tmp_result); + + return result; +} + +static bool cf_iceland_voltage_control(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + return ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control; +} + +/* + * -------------- Voltage Tables ---------------------- + * If the voltage table would be bigger than what will fit into the + * state table on the SMC keep only the higher entries. + */ + +static void iceland_trim_voltage_table_to_fit_state_table( + struct pp_hwmgr *hwmgr, + uint32_t max_voltage_steps, + pp_atomctrl_voltage_table *voltage_table) +{ + unsigned int i, diff; + + if (voltage_table->count <= max_voltage_steps) { + return; + } + + diff = voltage_table->count - max_voltage_steps; + + for (i = 0; i < max_voltage_steps; i++) { + voltage_table->entries[i] = voltage_table->entries[i + diff]; + } + + voltage_table->count = max_voltage_steps; + + return; +} + +/** + * Enable voltage control + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_enable_voltage_control(struct pp_hwmgr *hwmgr) +{ + /* enable voltage control */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); + + return 0; +} + +static int iceland_get_svi2_voltage_table(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *voltage_dependency_table, + pp_atomctrl_voltage_table *voltage_table) +{ + uint32_t i; + + PP_ASSERT_WITH_CODE((NULL != voltage_table), + "Voltage Dependency Table empty.", return -EINVAL;); + + voltage_table->mask_low = 0; + voltage_table->phase_delay = 0; + voltage_table->count = voltage_dependency_table->count; + + for (i = 0; i < voltage_dependency_table->count; i++) { + voltage_table->entries[i].value = + voltage_dependency_table->entries[i].v; + voltage_table->entries[i].smio_low = 0; + } + + return 0; +} + +/** + * Create Voltage Tables. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_construct_voltage_tables(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + int result; + + /* GPIO voltage */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, + &data->vddc_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDC table.", return result;); + } else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + /* SVI2 VDDC voltage */ + result = iceland_get_svi2_voltage_table(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_mclk, + &data->vddc_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); + } + + PP_ASSERT_WITH_CODE( + (data->vddc_voltage_table.count <= (SMU71_MAX_LEVELS_VDDC)), + "Too many voltage values for VDDC. Trimming to fit state table.", + iceland_trim_voltage_table_to_fit_state_table(hwmgr, + SMU71_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)); + ); + + /* GPIO */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDCI table.", return result;); + } + + /* SVI2 VDDCI voltage */ + if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { + result = iceland_get_svi2_voltage_table(hwmgr, + hwmgr->dyn_state.vddci_dependency_on_mclk, + &data->vddci_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;); + } + + PP_ASSERT_WITH_CODE( + (data->vddci_voltage_table.count <= (SMU71_MAX_LEVELS_VDDCI)), + "Too many voltage values for VDDCI. Trimming to fit state table.", + iceland_trim_voltage_table_to_fit_state_table(hwmgr, + SMU71_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)); + ); + + + /* GPIO */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve table.", return result;); + } + + /* SVI2 voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + result = iceland_get_svi2_voltage_table(hwmgr, + hwmgr->dyn_state.mvdd_dependency_on_mclk, + &data->mvdd_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 MVDD table from dependancy table.", return result;); + } + + PP_ASSERT_WITH_CODE( + (data->mvdd_voltage_table.count <= (SMU71_MAX_LEVELS_MVDD)), + "Too many voltage values for MVDD. Trimming to fit state table.", + iceland_trim_voltage_table_to_fit_state_table(hwmgr, + SMU71_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)); + ); + + return 0; +} + +/*---------------------------MC----------------------------*/ + +uint8_t iceland_get_memory_module_index(struct pp_hwmgr *hwmgr) +{ + return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); +} + +bool iceland_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) +{ + bool result = true; + + switch (inReg) { + case mmMC_SEQ_RAS_TIMING: + *outReg = mmMC_SEQ_RAS_TIMING_LP; + break; + + case mmMC_SEQ_DLL_STBY: + *outReg = mmMC_SEQ_DLL_STBY_LP; + break; + + case mmMC_SEQ_G5PDX_CMD0: + *outReg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + + case mmMC_SEQ_G5PDX_CMD1: + *outReg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + + case mmMC_SEQ_G5PDX_CTRL: + *outReg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + + case mmMC_SEQ_CAS_TIMING: + *outReg = mmMC_SEQ_CAS_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING: + *outReg = mmMC_SEQ_MISC_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING2: + *outReg = mmMC_SEQ_MISC_TIMING2_LP; + break; + + case mmMC_SEQ_PMG_DVS_CMD: + *outReg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + + case mmMC_SEQ_PMG_DVS_CTL: + *outReg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + + case mmMC_SEQ_RD_CTL_D0: + *outReg = mmMC_SEQ_RD_CTL_D0_LP; + break; + + case mmMC_SEQ_RD_CTL_D1: + *outReg = mmMC_SEQ_RD_CTL_D1_LP; + break; + + case mmMC_SEQ_WR_CTL_D0: + *outReg = mmMC_SEQ_WR_CTL_D0_LP; + break; + + case mmMC_SEQ_WR_CTL_D1: + *outReg = mmMC_SEQ_WR_CTL_D1_LP; + break; + + case mmMC_PMG_CMD_EMRS: + *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + + case mmMC_PMG_CMD_MRS: + *outReg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + + case mmMC_PMG_CMD_MRS1: + *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + + case mmMC_SEQ_PMG_TIMING: + *outReg = mmMC_SEQ_PMG_TIMING_LP; + break; + + case mmMC_PMG_CMD_MRS2: + *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + + case mmMC_SEQ_WR_CTL_2: + *outReg = mmMC_SEQ_WR_CTL_2_LP; + break; + + default: + result = false; + break; + } + + return result; +} + +int iceland_set_s0_mc_reg_index(phw_iceland_mc_reg_table *table) +{ + uint32_t i; + uint16_t address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) + ? address : table->mc_reg_address[i].s1; + } + return 0; +} + +int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_iceland_mc_reg_table *ni_table) +{ + uint8_t i, j; + + PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), + "Invalid VramInfo table.", return -1); + + for (i = 0; i < table->last; i++) { + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + } + ni_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ni_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + ni_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + + ni_table->num_entries = table->num_entries; + + return 0; +} + +/** + * VBIOS omits some information to reduce size, we need to recover them here. + * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. + * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] + * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0]. + * 3. need to set these data for each clock range + * + * @param hwmgr the address of the powerplay hardware manager. + * @param table the address of MCRegTable + * @return always 0 + */ +static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_iceland_mc_reg_table *table) +{ + uint8_t i, j, k; + uint32_t temp_reg; + const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + for (i = 0, j = table->last; i < table->last; i++) { + PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + switch (table->mc_reg_address[i].s1) { + /* + * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write + * to mmMC_PMG_CMD_EMRS/_LP[15:0]. Bit[15:0] MRS, need + * to be update mmMC_PMG_CMD_MRS/_LP[15:0] + */ + case mmMC_SEQ_MISC1: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + + if (!data->is_memory_GDDR5) { + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + + if (!data->is_memory_GDDR5) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + } + + break; + + case mmMC_SEQ_RESERVE_M: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + break; + + default: + break; + } + + } + + table->last = j; + + return 0; +} + + +static int iceland_set_valid_flag(phw_iceland_mc_reg_table *table) +{ + uint8_t i, j; + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->validflag |= (1<<i); + break; + } + } + } + + return 0; +} + +static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + pp_atomctrl_mc_reg_table *table; + phw_iceland_mc_reg_table *ni_table = &data->iceland_mc_reg_table; + uint8_t module_index = iceland_get_memory_module_index(hwmgr); + + table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + /* Program additional LP registers that are no longer programmed by VBIOS */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); + + memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); + + result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); + + if (0 == result) + result = iceland_copy_vbios_smc_reg_table(table, ni_table); + + if (0 == result) { + iceland_set_s0_mc_reg_index(ni_table); + result = iceland_set_mc_special_registers(hwmgr, ni_table); + } + + if (0 == result) + iceland_set_valid_flag(ni_table); + + kfree(table); + return result; +} + +/** + * Programs static screed detection parameters + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Set static screen threshold unit*/ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, + data->static_screen_threshold_unit); + /* Set static screen threshold*/ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, + data->static_screen_threshold); + + return 0; +} + +/** + * Setup display gap for glitch free memory clock switching. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_enable_display_gap(struct pp_hwmgr *hwmgr) +{ + uint32_t display_gap = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); + + display_gap = PHM_SET_FIELD(display_gap, + CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE); + + display_gap = PHM_SET_FIELD(display_gap, + CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL, display_gap); + + return 0; +} + +/** + * Programs activity state transition voting clients + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_program_voting_clients(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Clear reset for voting clients before enabling DPM */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); + + return 0; +} + +static int iceland_upload_firmware(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + if (!iceland_is_smc_ram_running(hwmgr->smumgr)) + ret = iceland_smu_upload_firmware_image(hwmgr->smumgr); + + return ret; +} + +/** + * Get the location of various tables inside the FW image. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + uint32_t tmp; + int result; + bool error = 0; + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, DpmTable), + &tmp, data->sram_end); + + if (0 == result) { + data->dpm_table_start = tmp; + } + + error |= (0 != result); + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, SoftRegisters), + &tmp, data->sram_end); + + if (0 == result) { + data->soft_regs_start = tmp; + } + + error |= (0 != result); + + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, mcRegisterTable), + &tmp, data->sram_end); + + if (0 == result) { + data->mc_reg_table_start = tmp; + } + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, FanTable), + &tmp, data->sram_end); + + if (0 == result) { + data->fan_table_start = tmp; + } + + error |= (0 != result); + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, mcArbDramTimingTable), + &tmp, data->sram_end); + + if (0 == result) { + data->arb_table_start = tmp; + } + + error |= (0 != result); + + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, Version), + &tmp, data->sram_end); + + if (0 == result) { + hwmgr->microcode_version_info.SMC = tmp; + } + + error |= (0 != result); + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, UlvSettings), + &tmp, data->sram_end); + + if (0 == result) { + data->ulv_settings_start = tmp; + } + + error |= (0 != result); + + return error ? 1 : 0; +} + +/* +* Copy one arb setting to another and then switch the active set. +* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants. +*/ +int iceland_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, + uint32_t arbFreqSrc, uint32_t arbFreqDest) +{ + uint32_t mc_arb_dram_timing; + uint32_t mc_arb_dram_timing2; + uint32_t burst_time; + uint32_t mc_cg_config; + + switch (arbFreqSrc) { + case MC_CG_ARB_FREQ_F0: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + break; + + case MC_CG_ARB_FREQ_F1: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); + break; + + default: + return -1; + } + + switch (arbFreqDest) { + case MC_CG_ARB_FREQ_F0: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); + break; + + case MC_CG_ARB_FREQ_F1: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); + break; + + default: + return -1; + } + + mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); + mc_cg_config |= 0x0000000F; + cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest); + + return 0; +} + +/** + * Initial switch from ARB F0->F1 + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +int iceland_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr) +{ + return iceland_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); +} + +/* ---------------------------------------- ULV related functions ----------------------------------------------------*/ + + +static int iceland_reset_single_dpm_table( + struct pp_hwmgr *hwmgr, + struct iceland_single_dpm_table *dpm_table, + uint32_t count) +{ + uint32_t i; + if (!(count <= MAX_REGULAR_DPM_NUMBER)) + printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \ + table entries to exceed max number! \n"); + + dpm_table->count = count; + for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) { + dpm_table->dpm_levels[i].enabled = 0; + } + + return 0; +} + +static void iceland_setup_pcie_table_entry( + struct iceland_single_dpm_table *dpm_table, + uint32_t index, uint32_t pcie_gen, + uint32_t pcie_lanes) +{ + dpm_table->dpm_levels[index].value = pcie_gen; + dpm_table->dpm_levels[index].param1 = pcie_lanes; + dpm_table->dpm_levels[index].enabled = 1; +} + +/* + * Set up the PCIe DPM table as follows: + * + * A = Performance State, Max, Gen Speed + * C = Performance State, Min, Gen Speed + * 1 = Performance State, Max, Lane # + * 3 = Performance State, Min, Lane # + * + * B = Power Saving State, Max, Gen Speed + * D = Power Saving State, Min, Gen Speed + * 2 = Power Saving State, Max, Lane # + * 4 = Power Saving State, Min, Lane # + * + * + * DPM Index Gen Speed Lane # + * 5 A 1 + * 4 B 2 + * 3 C 1 + * 2 D 2 + * 1 C 3 + * 0 D 4 + * + */ +static int iceland_setup_default_pcie_tables(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || + data->use_pcie_power_saving_levels), + "No pcie performance levels!", return -EINVAL); + + if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) { + data->pcie_gen_power_saving = data->pcie_gen_performance; + data->pcie_lane_power_saving = data->pcie_lane_performance; + } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) { + data->pcie_gen_performance = data->pcie_gen_power_saving; + data->pcie_lane_performance = data->pcie_lane_power_saving; + } + + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU71_MAX_LEVELS_LINK); + + /* Hardcode Pcie Table */ + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, + get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, + get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + data->dpm_table.pcie_speed_table.count = 6; + + return 0; + +} + + +/* + * This function is to initalize all DPM state tables for SMU7 based on the dependency table. + * Dynamic state patching function will then trim these state tables to the allowed range based + * on the power policy or external client requests, such as UVD request, etc. + */ +static int iceland_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t i; + + struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = + hwmgr->dyn_state.vddc_dependency_on_mclk; + struct phm_cac_leakage_table *std_voltage_table = + hwmgr->dyn_state.cac_leakage_table; + + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, + "SCLK dependency table is missing. This table is mandatory", return -1); + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, + "SCLK dependency table has to have is missing. This table is mandatory", return -1); + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -1); + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, + "VMCLK dependency table has to have is missing. This table is mandatory", return -1); + + /* clear the state table to reset everything to default */ + memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU71_MAX_LEVELS_GRAPHICS); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU71_MAX_LEVELS_MEMORY); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vddc_table, SMU71_MAX_LEVELS_VDDC); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vdd_ci_table, SMU71_MAX_LEVELS_VDDCI); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mvdd_table, SMU71_MAX_LEVELS_MVDD); + + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, + "SCLK dependency table is missing. This table is mandatory", return -1); + /* Initialize Sclk DPM table based on allow Sclk values*/ + data->dpm_table.sclk_table.count = 0; + + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != + allowed_vdd_sclk_table->entries[i].clk) { + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = + allowed_vdd_sclk_table->entries[i].clk; + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ + data->dpm_table.sclk_table.count++; + } + } + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -1); + /* Initialize Mclk DPM table based on allow Mclk values */ + data->dpm_table.mclk_table.count = 0; + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != + allowed_vdd_mclk_table->entries[i].clk) { + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = + allowed_vdd_mclk_table->entries[i].clk; + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ + data->dpm_table.mclk_table.count++; + } + } + + /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; + /* param1 is for corresponding std voltage */ + data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; + } + + data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; + allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; + + if (NULL != allowed_vdd_mclk_table) { + /* Initialize Vddci DPM table based on allow Mclk values */ + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; + } + + allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; + + if (NULL != allowed_vdd_mclk_table) { + /* + * Initialize MVDD DPM table based on allow Mclk + * values + */ + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; + } + + /* setup PCIE gen speed levels*/ + iceland_setup_default_pcie_tables(hwmgr); + + /* save a copy of the default DPM table*/ + memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct iceland_dpm_table)); + + return 0; +} + +/** + * @brief PhwIceland_GetVoltageOrder + * Returns index of requested voltage record in lookup(table) + * @param hwmgr - pointer to hardware manager + * @param lookutab - lookup list to search in + * @param voltage - voltage to look for + * @return 0 on success + */ +uint8_t iceland_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table, + uint16_t voltage) +{ + uint8_t count = (uint8_t) (look_up_table->count); + uint8_t i; + + PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;); + PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;); + + for (i = 0; i < count; i++) { + /* find first voltage equal or bigger than requested */ + if (look_up_table->entries[i].us_vdd >= voltage) + return i; + } + + /* voltage is bigger than max voltage in the table */ + return i-1; +} + + +static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, uint16_t *hi, + uint16_t *lo) +{ + uint16_t v_index; + bool vol_found = false; + *hi = tab->value * VOLTAGE_SCALE; + *lo = tab->value * VOLTAGE_SCALE; + + /* SCLK/VDDC Dependency Table has to exist. */ + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk, + "The SCLK/VDDC Dependency Table does not exist.\n", + return -EINVAL); + + if (NULL == hwmgr->dyn_state.cac_leakage_table) { + pr_warning("CAC Leakage Table does not exist, using vddc.\n"); + return 0; + } + + /* + * Since voltage in the sclk/vddc dependency table is not + * necessarily in ascending order because of ELB voltage + * patching, loop through entire list to find exact voltage. + */ + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); + } else { + pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + /* + * If voltage is not found in the first pass, loop again to + * find the best match, equal or higher value. + */ + if (!vol_found) { + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; + } else { + pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + if (!vol_found) + pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); + } + + return 0; +} + +static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, + SMU71_Discrete_VoltageLevel *smc_voltage_tab) { + int result; + + + result = iceland_get_std_voltage_value_sidd(hwmgr, tab, + &smc_voltage_tab->StdVoltageHiSidd, + &smc_voltage_tab->StdVoltageLoSidd); + if (0 != result) { + smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE; + smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE; + } + + smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + + return 0; +} + +/** + * Vddc table preparation for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + unsigned int count; + int result; + + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + table->VddcLevelCount = data->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &data->vddc_voltage_table.entries[count], + &table->VddcLevel[count]); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) + table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; + else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + table->VddcLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + + return 0; +} + +/** + * Vddci table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result; + uint32_t count; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + table->VddciLevelCount = data->vddci_voltage_table.count; + for (count = 0; count < table->VddciLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &data->vddci_voltage_table.entries[count], + &table->VddciLevel[count]); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) + table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; + else + table->VddciLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + + return 0; +} + +/** + * Mvdd table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result; + uint32_t count; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + table->MvddLevelCount = data->mvdd_voltage_table.count; + for (count = 0; count < table->MvddLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &data->mvdd_voltage_table.entries[count], + &table->MvddLevel[count]); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) + table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; + else + table->MvddLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); + + return 0; +} + +/** + * Convert a voltage value in mv unit to VID number required by SMU firmware + */ +static uint8_t convert_to_vid(uint16_t vddc) +{ + return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25); +} + +int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint8_t * hi_vid = data->power_tune_table.BapmVddCVidHiSidd; + uint8_t * lo_vid = data->power_tune_table.BapmVddCVidLoSidd; + + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table, + "The CAC Leakage table does not exist!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8, + "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count, + "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { + for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) { + lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); + hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); + } + } else { + PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL); + } + + return 0; +} + +int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint8_t *vid = data->power_tune_table.VddCVid; + + PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8, + "There should never be more than 8 entries for VddcVid!!!", + return -EINVAL); + + for (i = 0; i < (int)data->vddc_voltage_table.count; i++) { + vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); + } + + return 0; +} + +/** + * Preparation of voltage tables for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ + +int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result; + + result = iceland_populate_smc_vddc_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDC voltage table to SMC", return -1); + + result = iceland_populate_smc_vdd_ci_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDCI voltage table to SMC", return -1); + + result = iceland_populate_smc_mvdd_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate MVDD voltage table to SMC", return -1); + + return 0; +} + + +/** + * Re-generate the DPM level mask value + * @param hwmgr the address of the hardware manager + */ +static uint32_t iceland_get_dpm_level_enable_mask_value( + struct iceland_single_dpm_table * dpm_table) +{ + uint32_t i; + uint32_t mask_value = 0; + + for (i = dpm_table->count; i > 0; i--) { + mask_value = mask_value << 1; + + if (dpm_table->dpm_levels[i-1].enabled) + mask_value |= 0x1; + else + mask_value &= 0xFFFFFFFE; + } + return mask_value; +} + +int iceland_populate_memory_timing_parameters( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock, + struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs + ) +{ + uint32_t dramTiming; + uint32_t dramTiming2; + uint32_t burstTime; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + engine_clock, memory_clock); + + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + + return 0; +} + +/** + * Setup parameters for the MC ARB. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + int result = 0; + SMU71_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + + memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = iceland_populate_memory_timing_parameters + (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + + if (0 != result) { + break; + } + } + } + + if (0 == result) { + result = iceland_copy_bytes_to_smc( + hwmgr->smumgr, + data->arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU71_Discrete_MCArbDramTimingTable), + data->sram_end + ); + } + + return result; +} + +static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + struct iceland_dpm_table *dpm_table = &data->dpm_table; + uint32_t i; + + /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = + 1; + table->LinkLevel[i].SPC = + (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = + PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = + PP_HOST_TO_SMC_UL(30); + } + + data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + iceland_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +uint8_t iceland_get_voltage_id(pp_atomctrl_voltage_table *voltage_table, + uint32_t voltage) +{ + uint8_t count = (uint8_t) (voltage_table->count); + uint8_t i = 0; + + PP_ASSERT_WITH_CODE((NULL != voltage_table), + "Voltage Table empty.", return 0;); + PP_ASSERT_WITH_CODE((0 != count), + "Voltage Table empty.", return 0;); + + for (i = 0; i < count; i++) { + /* find first voltage bigger than requested */ + if (voltage_table->entries[i].value >= voltage) + return i; + } + + /* voltage is bigger than max voltage in the table */ + return i - 1; +} + +static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + + +static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *tab) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + tab->SVI2Enable |= VDDC_ON_SVI2; + + if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) + tab->SVI2Enable |= VDDCI_ON_SVI2; + else + tab->MergedVddci = 1; + + if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) + tab->SVI2Enable |= MVDD_ON_SVI2; + + PP_ASSERT_WITH_CODE( tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) && + (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL); + + return 0; +} + +static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, + uint32_t clock, uint32_t *vol) +{ + uint32_t i = 0; + + /* clock - voltage dependency table is empty table */ + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + /* find first sclk bigger than request */ + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + *vol = allowed_clock_voltage_table->entries[i].v; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *vol = allowed_clock_voltage_table->entries[i - 1].v; + + return 0; +} + +static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock, + bool strobe_mode) +{ + uint8_t mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) { + mc_para_index = 0x00; + } else if (memory_clock > 47500) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); + } + } else { + if (memory_clock < 65000) { + mc_para_index = 0x00; + } else if (memory_clock > 135000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); + } + } + + return mc_para_index; +} + +static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) +{ + uint8_t mc_para_index; + + if (memory_clock < 10000) { + mc_para_index = 0; + } else if (memory_clock >= 80000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); + } + + return mc_para_index; +} + +static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, + uint32_t sclk, uint32_t *p_shed) +{ + unsigned int i; + + /* use the minimum phase shedding */ + *p_shed = 1; + + /* + * PPGen ensures the phase shedding limits table is sorted + * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk. + * VBIOS ensures the phase shedding masks table is sorted from + * least phases enabled (phase shedding on) to most phases + * enabled (phase shedding off). + */ + for (i = 0; i < pl->count; i++) { + if (sclk < pl->entries[i].Sclk) { + /* Enable phase shedding */ + *p_shed = i; + break; + } + } + + return 0; +} + +static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, + uint32_t memory_clock, uint32_t *p_shed) +{ + unsigned int i; + + /* use the minimum phase shedding */ + *p_shed = 1; + + /* + * PPGen ensures the phase shedding limits table is sorted + * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk. + * VBIOS ensures the phase shedding masks table is sorted from + * least phases enabled (phase shedding on) to most phases + * enabled (phase shedding off). + */ + for (i = 0; i < pl->count; i++) { + if (memory_clock < pl->entries[i].Mclk) { + /* Enable phase shedding */ + *p_shed = i; + break; + } + } + + return 0; +} + +/** + * Populates the SMC MCLK structure using the provided memory clock + * + * @param hwmgr the address of the hardware manager + * @param memory_clock the memory clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_calculate_mclk_params( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU71_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dllStateOn + ) +{ + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; + uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; + uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; + uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; + uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; + uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; + uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; + + pp_atomctrl_memory_clock_param mpll_param; + int result; + + result = atomctrl_get_memory_pll_dividers_si(hwmgr, + memory_clock, &mpll_param, strobe_mode); + PP_ASSERT_WITH_CODE(0 == result, + "Error retrieving Memory Clock Parameters from VBIOS.", return result); + + /* MPLL_FUNC_CNTL setup*/ + mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); + + /* MPLL_FUNC_CNTL_1 setup*/ + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); + + /* MPLL_AD_FUNC_CNTL setup*/ + mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, + MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + + if (data->is_memory_GDDR5) { + /* MPLL_DQ_FUNC_CNTL setup*/ + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { + /* + ************************************ + Fref = Reference Frequency + NF = Feedback divider ratio + NR = Reference divider ratio + Fnom = Nominal VCO output frequency = Fref * NF / NR + Fs = Spreading Rate + D = Percentage down-spread / 2 + Fint = Reference input frequency to PFD = Fref / NR + NS = Spreading rate divider ratio = int(Fint / (2 * Fs)) + CLKS = NS - 1 = ISS_STEP_NUM[11:0] + NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2) + CLKV = 65536 * NV = ISS_STEP_SIZE[25:0] + ************************************* + */ + pp_atomctrl_internal_ss_info ss_info; + uint32_t freq_nom; + uint32_t tmp; + uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); + + /* for GDDR5 for all modes and DDR3 */ + if (1 == mpll_param.qdr) + freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); + + /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + + if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { + /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */ + /* ss.Info.speed_spectrum_rate -- in unit of khz */ + /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */ + /* = reference_clock * 5 / speed_spectrum_rate */ + uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; + + /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */ + /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */ + uint32_t clkv = + (uint32_t)((((131 * ss_info.speed_spectrum_percentage * + ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); + + mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); + mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); + } + } + + /* MCLK_PWRMGT_CNTL setup */ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); + + + /* Save the result data to outpupt memory level structure */ + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static int iceland_populate_single_memory_level( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU71_Discrete_MemoryLevel *memory_level + ) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + int result = 0; + bool dllStateOn; + struct cgs_display_info info = {0}; + + + if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); + } + + if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) { + memory_level->MinVddci = memory_level->MinVddc; + } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddci_dependency_on_mclk, + memory_clock, + &memory_level->MinVddci); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result); + } + + if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.mvdd_dependency_on_mclk, memory_clock, &memory_level->MinMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinMVDD voltage value from memory MVDD voltage dependency table", return result); + } + + memory_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) { + iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table, + memory_clock, &memory_level->MinVddcPhases); + } + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 1; + memory_level->UpHyst = 0; + memory_level->DownHyst = 100; + memory_level->VoltageDownHyst = 0; + + /* Indicates maximum activity level for this performance level.*/ + memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + memory_level->StutterEnable = 0; + memory_level->StrobeEnable = 0; + memory_level->EdcReadEnable = 0; + memory_level->EdcWriteEnable = 0; + memory_level->RttEnable = 0; + + /* default set to low watermark. Highest level will be set to high later.*/ + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + cgs_get_active_displays_info(hwmgr->device, &info); + data->display_timing.num_existing_displays = info.display_count; + + //if ((data->mclk_stutter_mode_threshold != 0) && + // (memory_clock <= data->mclk_stutter_mode_threshold) && + // (data->is_uvd_enabled == 0) + // && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) + // && (data->display_timing.num_existing_displays <= 2) + // && (data->display_timing.num_existing_displays != 0)) + // memory_level->StutterEnable = 1; + + /* decide strobe mode*/ + memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) && + (memory_clock <= data->mclk_strobe_mode_threshold); + + /* decide EDC mode and memory clock ratio*/ + if (data->is_memory_GDDR5) { + memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock, + memory_level->StrobeEnable); + + if ((data->mclk_edc_enable_threshold != 0) && + (memory_clock > data->mclk_edc_enable_threshold)) { + memory_level->EdcReadEnable = 1; + } + + if ((data->mclk_edc_wr_enable_threshold != 0) && + (memory_clock > data->mclk_edc_wr_enable_threshold)) { + memory_level->EdcWriteEnable = 1; + } + + if (memory_level->StrobeEnable) { + if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >= + ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) { + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } else { + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; + } + + } else { + dllStateOn = data->dll_defaule_on; + } + } else { + memory_level->StrobeRatio = + iceland_get_ddr3_mclk_frequency_ratio(memory_clock); + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } + + result = iceland_calculate_mclk_params(hwmgr, + memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn); + + if (0 == result) { + memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases); + memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE); + memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE); + /* MCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); + /* Indicates maximum activity level for this performance level.*/ + CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); + } + + return result; +} + +/** + * Populates the SMC MVDD structure using the provided memory clock. + * + * @param hwmgr the address of the hardware manager + * @param mclk the MCLK value to be used in the decision if MVDD should be high or low. + * @param voltage the SMC VOLTAGE structure to be populated + */ +int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMU71_Discrete_VoltageLevel *voltage) +{ + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t i = 0; + + if (ICELAND_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) { + if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { + /* Always round to higher voltage. */ + voltage->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + + PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count, + "MVDD Voltage is outside the supported range.", return -1); + + } else { + return -1; + } + + return 0; +} + + +static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result = 0; + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + SMU71_Discrete_VoltageLevel voltage_level; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + + /* The ACPI state should not do DPM on DC (or ever).*/ + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (data->acpi_vddc) + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); + else + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pp_table * VOLTAGE_SCALE); + + table->ACPILevel.MinVddcPhases = (data->vddc_phase_shed_control) ? 0 : 1; + + /* assign zero for now*/ + table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* divider ID for required SCLK*/ + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, + CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + + /* For various features to be enabled/disabled while this level is active.*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + /* SCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; + table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; + + /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/ + + if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinMvdd = 0; + + /* Force reset on DLL*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); + + /* Disable DLL in ACPIState*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); + + /* Enable DLL bypass signal*/ + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK0_BYPASS, 0); + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK1_BYPASS, 0); + + table->MemoryACPILevel.DllCntl = + PP_HOST_TO_SMC_UL(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = + PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); + table->MemoryACPILevel.MpllDqFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl_1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); + table->MemoryACPILevel.MpllSs1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); + table->MemoryACPILevel.MpllSs2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + /* Indicates maximum activity level for this performance level.*/ + table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = 0; + table->MemoryACPILevel.StrobeEnable = 0; + table->MemoryACPILevel.EdcReadEnable = 0; + table->MemoryACPILevel.EdcWriteEnable = 0; + table->MemoryACPILevel.RttEnable = 0; + + return result; +} + +static int iceland_find_boot_level(struct iceland_single_dpm_table *table, uint32_t value, uint32_t *boot_level) +{ + int result = 0; + uint32_t i; + + for (i = 0; i < table->count; i++) { + if (value == table->dpm_levels[i].value) { + *boot_level = i; + result = 0; + } + } + return result; +} + +/** + * Calculates the SCLK dividers using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk) +{ + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t reference_clock; + uint32_t reference_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/ + reference_clock = atomctrl_get_reference_clock(hwmgr); + + reference_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider*/ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup*/ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + pp_atomctrl_internal_ss_info ss_info; + + uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div; + if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) { + /* + * ss_info.speed_spectrum_percentage -- in unit of 0.01% + * ss_info.speed_spectrum_rate -- in unit of khz + */ + /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */ + uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate); + + /* clkv = 2 * D * fbdiv / NS */ + uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000); + + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS); + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = + PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV); + } + } + + sclk->SclkFrequency = engine_clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +static uint8_t iceland_get_sleep_divider_id_from_clock(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, uint32_t min_engine_clock_in_sr) +{ + uint32_t i, temp; + uint32_t min = (min_engine_clock_in_sr > ICELAND_MINIMUM_ENGINE_CLOCK) ? + min_engine_clock_in_sr : ICELAND_MINIMUM_ENGINE_CLOCK; + + PP_ASSERT_WITH_CODE((engine_clock >= min), + "Engine clock can't satisfy stutter requirement!", return 0); + + for (i = ICELAND_MAX_DEEPSLEEP_DIVIDER_ID;; i--) { + temp = engine_clock / (1 << i); + + if(temp >= min || i == 0) + break; + } + return (uint8_t)i; +} + +/** + * Populates single SMC SCLK structure using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, uint16_t sclk_activity_level_threshold, + SMU71_Discrete_GraphicsLevel *graphic_level) +{ + int result; + uint32_t threshold; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level); + + + /* populate graphics levels*/ + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, &graphic_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for VDDC engine clock dependency table", return result); + + /* SCLK frequency in units of 10KHz*/ + graphic_level->SclkFrequency = engine_clock; + + /* + * Minimum VDDC phases required to support this level, it + * should get from dependence table. + */ + graphic_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) { + iceland_populate_phase_value_based_on_sclk(hwmgr, + hwmgr->dyn_state.vddc_phase_shed_limits_table, + engine_clock, + &graphic_level->MinVddcPhases); + } + + /* Indicates maximum activity level for this performance level. 50% for now*/ + graphic_level->ActivityLevel = sclk_activity_level_threshold; + + graphic_level->CcPwrDynRm = 0; + graphic_level->CcPwrDynRm1 = 0; + /* this level can be used if activity is high enough.*/ + graphic_level->EnabledForActivity = 1; + /* this level can be used for throttling.*/ + graphic_level->EnabledForThrottle = 1; + graphic_level->UpHyst = 0; + graphic_level->DownHyst = 100; + graphic_level->VoltageDownHyst = 0; + graphic_level->PowerThrottle = 0; + + threshold = engine_clock * data->fast_watermark_threshold / 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + graphic_level->DeepSleepDivId = + iceland_get_sleep_divider_id_from_clock(hwmgr, engine_clock, + data->display_timing.min_clock_insr); + } + + /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ + graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (0 == result) { + graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE); + /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/ + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1); + } + + return result; +} + +/** + * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states + * + * @param hwmgr the address of the hardware manager + */ +static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + struct iceland_dpm_table *dpm_table = &data->dpm_table; + int result = 0; + uint32_t level_array_adress = data->dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, GraphicsLevel); + + uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) * SMU71_MAX_LEVELS_GRAPHICS; + SMU71_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; + uint32_t i; + uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0; + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = iceland_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)data->activity_target[i], + &(data->smc_state_table.GraphicsLevel[i])); + if (0 != result) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + } + + /* set highest level watermark to high */ + if (dpm_table->sclk_table.count > 1) + data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + iceland_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (highest_pcie_level_enabled + 1))) != 0) { + highest_pcie_level_enabled++; + } + + while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0) { + lowest_pcie_level_enabled++; + } + + while ((count < highest_pcie_level_enabled) && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) { + count++; + } + + mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ? + (lowest_pcie_level_enabled + 1 + count) : highest_pcie_level_enabled; + + /* set pcieDpmLevel to highest_pcie_level_enabled*/ + for (i = 2; i < dpm_table->sclk_table.count; i++) { + data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled; + } + + /* set pcieDpmLevel to lowest_pcie_level_enabled*/ + data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled*/ + data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; + + /* level count will send to smc once at init smc table and never change*/ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); + + if (0 != result) + return result; + + return 0; +} + +/** + * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states + * + * @param hwmgr the address of the hardware manager + */ + +static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + struct iceland_dpm_table *dpm_table = &data->dpm_table; + int result; + /* populate MCLK dpm table to SMU7 */ + uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel); + uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY; + SMU71_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel; + uint32_t i; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", return -1); + result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, + &(data->smc_state_table.MemoryLevel[i])); + if (0 != result) { + return result; + } + } + + /* Only enable level 0 for now.*/ + data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; + + /* + * in order to prevent MC activity from stutter mode to push DPM up. + * the UVD change complements this by putting the MCLK in a higher state + * by default such that we are not effected by up threshold or and MCLK DPM latency. + */ + data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; + CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel); + + data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + /* set highest level watermark to high*/ + data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change*/ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, + level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); + + if (0 != result) { + return result; + } + + return 0; +} + +struct ICELAND_DLL_SPEED_SETTING +{ + uint16_t Min; /* Minimum Data Rate*/ + uint16_t Max; /* Maximum Data Rate*/ + uint32_t dll_speed; /* The desired DLL_SPEED setting*/ +}; + +static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *pstate) +{ + int result = 0; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t voltage_response_time, ulv_voltage; + + pstate->CcPwrDynRm = 0; + pstate->CcPwrDynRm1 = 0; + + //backbiasResponseTime is use for ULV state voltage value. + result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage); + PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;); + + if(!ulv_voltage) { + data->ulv.ulv_supported = false; + return 0; + } + + if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 != data->voltage_control) { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) { + pstate->VddcOffset = 0; + } + else { + /* used in SMIO Mode. not implemented for now. this is backup only for CI. */ + pstate->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); + } + } else { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if(ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) { + pstate->VddcOffsetVid = 0; + } else { + /* used in SVI2 Mode */ + pstate->VddcOffsetVid = (uint8_t)((hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + } + } + + /* used in SVI2 Mode to shed phase */ + pstate->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; + + if (0 == result) { + CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(pstate->VddcOffset); + } + + return result; +} + +static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *ulv) +{ + return iceland_populate_ulv_level(hwmgr, ulv); +} + +static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint8_t count, level; + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk + >= data->vbios_boot_state.sclk_bootup_value) { + data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk + >= data->vbios_boot_state.mclk_bootup_value) { + data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +/** + * Initializes the SMC table and uploads it + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pInput the pointer to input data (PowerState) + * @return always 0 + */ +int iceland_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + SMU71_Discrete_DpmTable *table = &(data->smc_state_table); + const struct phw_iceland_ulv_parm *ulv = &(data->ulv); + + result = iceland_setup_default_dpm_tables(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to setup default DPM tables!", return result;); + memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table)); + + if (ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control) { + iceland_populate_smc_voltage_tables(hwmgr, table); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + } + + if (data->is_memory_GDDR5) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + } + + if (ulv->ulv_supported) { + result = iceland_populate_ulv_state(hwmgr, &data->ulv_setting); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result;); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter); + } + + result = iceland_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result;); + + result = iceland_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result;); + + result = iceland_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result;); + + result = iceland_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result;); + + result = iceland_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result;); + + result = iceland_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result;); + + result = iceland_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result;); + + /* + * Since only the initial state is completely set up at this + * point (the other states are just copies of the boot state) + * we only need to populate the ARB settings for the initial + * state. + */ + result = iceland_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result;); + + result = iceland_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result;); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table */ + result = iceland_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(data->smc_state_table.GraphicsBootLevel)); + + if (result) + pr_warning("VBIOS did not find boot engine clock value in dependency table.\n"); + + result = iceland_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(data->smc_state_table.MemoryBootLevel)); + + if (result) + pr_warning("VBIOS did not find boot memory clock value in dependency table.\n"); + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value; + if (ICELAND_VOLTAGE_CONTROL_NONE == data->vdd_ci_control) { + table->BootVddci = table->BootVddc; + } + else { + table->BootVddci = data->vbios_boot_state.vddci_bootup_value; + } + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; + + result = iceland_populate_smc_initial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result); + + result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result); + + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + (data->thermal_temp_setting.temperature_high * + ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + table->TemperatureLimitLow = + (data->thermal_temp_setting.temperature_low * + ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = 0; + table->PCIeGenInterval = 1; + + result = iceland_populate_smc_svi2_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate SVI2 setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); + table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); + table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU71_Discrete_DpmTable) - 3 * sizeof(SMU71_PIDController), + data->sram_end); + + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result); + + /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, + data->ulv_settings_start, + (uint8_t *)&(data->ulv_setting), + sizeof(SMU71_Discrete_Ulv), + data->sram_end); + +#if 0 + /* Notify SMC to follow new GPIO scheme */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) { + if (0 == iceland_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_UseNewGPIOScheme)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); + } +#endif + + return result; +} + +int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table) +{ + const struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + uint32_t i, j; + + for (i = 0, j = 0; j < data->iceland_mc_reg_table.last; j++) { + if (data->iceland_mc_reg_table.validflag & 1<<j) { + PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE, + "Index of mc_reg_table->address[] array out of boundary", return -1); + mc_reg_table->address[i].s0 = + PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (uint8_t)i; + + return 0; +} + +/* convert register values from driver to SMC format */ +void iceland_convert_mc_registers( + const phw_iceland_mc_reg_entry * pEntry, + SMU71_Discrete_MCRegisterSet *pData, + uint32_t numEntries, uint32_t validflag) +{ + uint32_t i, j; + + for (i = 0, j = 0; j < numEntries; j++) { + if (validflag & 1<<j) { + pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]); + i++; + } + } +} + +/* find the entry in the memory range table, then populate the value to SMC's iceland_mc_reg_table */ +int iceland_convert_mc_reg_table_entry_to_smc( + struct pp_hwmgr *hwmgr, + const uint32_t memory_clock, + SMU71_Discrete_MCRegisterSet *mc_reg_table_data + ) +{ + const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t i = 0; + + for (i = 0; i < data->iceland_mc_reg_table.num_entries; i++) { + if (memory_clock <= + data->iceland_mc_reg_table.mc_reg_table_entry[i].mclk_max) { + break; + } + } + + if ((i == data->iceland_mc_reg_table.num_entries) && (i > 0)) + --i; + + iceland_convert_mc_registers(&data->iceland_mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, data->iceland_mc_reg_table.last, data->iceland_mc_reg_table.validflag); + + return 0; +} + +int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, + SMU71_Discrete_MCRegisters *mc_reg_table) +{ + int result = 0; + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + int res; + uint32_t i; + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + res = iceland_convert_mc_reg_table_entry_to_smc( + hwmgr, + data->dpm_table.mclk_table.dpm_levels[i].value, + &mc_reg_table->data[i] + ); + + if (0 != res) + result = res; + } + + return result; +} + +int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + memset(&data->mc_reg_table, 0x00, sizeof(SMU71_Discrete_MCRegisters)); + result = iceland_populate_mc_reg_address(hwmgr, &(data->mc_reg_table)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for the MC register addresses!", return result;); + + result = iceland_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for driver state!", return result;); + + return iceland_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start, + (uint8_t *)&data->mc_reg_table, sizeof(SMU71_Discrete_MCRegisters), data->sram_end); +} + +int iceland_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) +{ + PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; + + return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; +} + +int iceland_enable_sclk_control(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0); + + return 0; +} + +int iceland_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* enable SCLK dpm */ + if (0 == data->sclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Enable)), + "Failed to enable SCLK DPM during DPM Start Function!", + return -1); + } + + /* enable MCLK dpm */ + if (0 == data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Enable)), + "Failed to enable MCLK DPM during DPM Start Function!", + return -1); + + PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_CPL_CNTL, 0x100005);/*Read */ + + udelay(10); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_CPL_CNTL, 0x500005);/* write */ + + } + + return 0; +} + +int iceland_start_dpm(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* enable general power management */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1); + /* enable sclk deep sleep */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1); + + /* prepare for PCIE DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_12, VoltageChangeTimeout, 0x1000); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0); + + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_Voltage_Cntl_Enable)), + "Failed to enable voltage DPM during DPM Start Function!", + return -1); + + if (0 != iceland_enable_sclk_mclk_dpm(hwmgr)) { + PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1); + } + + /* enable PCIE dpm */ + if (0 == data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Enable)), + "Failed to enable pcie DPM during DPM Start Function!", + return -1 + ); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition)) { + smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_EnableACDCGPIOInterrupt); + } + + return 0; +} + +static void iceland_set_dpm_event_sources(struct pp_hwmgr *hwmgr, + uint32_t sources) +{ + bool protection; + enum DPM_EVENT_SRC src; + + switch (sources) { + default: + printk(KERN_ERR "Unknown throttling event sources."); + /* fall through */ + case 0: + protection = false; + /* src is unused */ + break; + case (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL; + break; + case (1 << PHM_AutoThrottleSource_External): + protection = true; + src = DPM_EVENT_SRC_EXTERNAL; + break; + case (1 << PHM_AutoThrottleSource_External) | + (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; + break; + } + /* Order matters - don't enable thermal protection for the wrong source. */ + if (protection) { + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, + DPM_EVENT_SRC, src); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, + !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)); + } else + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, 1); +} + +static int iceland_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (!(data->active_auto_throttle_sources & (1 << source))) { + data->active_auto_throttle_sources |= 1 << source; + iceland_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int iceland_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return iceland_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + +static int iceland_tf_start_smc(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + if (!iceland_is_smc_ram_running(hwmgr->smumgr)) + ret = iceland_smu_start_smc(hwmgr->smumgr); + + return ret; +} + +/** +* Programs the Deep Sleep registers +* +* @param pHwMgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data (PhwEvergreen_DisplayConfiguration) +* @param pOutput the pointer to output data (unused) +* @param pStorage the pointer to temporary storage (unused) +* @param Result the last failure code (unused) +* @return always 0 +*/ +static int iceland_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_ON) != 0) + PP_ASSERT_WITH_CODE(false, + "Attempt to enable Master Deep Sleep switch failed!", + return -EINVAL); + } else { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF) != 0) + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -EINVAL); + } + + return 0; +} + +static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + if (cf_iceland_voltage_control(hwmgr)) { + tmp_result = iceland_enable_voltage_control(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable voltage control!", return tmp_result); + + tmp_result = iceland_construct_voltage_tables(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to contruct voltage tables!", return tmp_result); + } + + tmp_result = iceland_initialize_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize MC reg table!", return tmp_result); + + tmp_result = iceland_program_static_screen_threshold_parameters(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program static screen threshold parameters!", return tmp_result); + + tmp_result = iceland_enable_display_gap(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable display gap!", return tmp_result); + + tmp_result = iceland_program_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program voting clients!", return tmp_result); + + tmp_result = iceland_upload_firmware(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to upload firmware header!", return tmp_result); + + tmp_result = iceland_process_firmware_header(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to process firmware header!", return tmp_result); + + tmp_result = iceland_initial_switch_from_arb_f0_to_f1(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize switch from ArbF0 to F1!", return tmp_result); + + tmp_result = iceland_init_smc_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize SMC table!", return tmp_result); + + tmp_result = iceland_populate_initial_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to populate initialize MC Reg table!", return tmp_result); + + tmp_result = iceland_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to populate PM fuses!", return tmp_result); + + /* start SMC */ + tmp_result = iceland_tf_start_smc(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to start SMC!", return tmp_result); + + /* enable SCLK control */ + tmp_result = iceland_enable_sclk_control(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SCLK control!", return tmp_result); + + tmp_result = iceland_enable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable deep sleep!", return tmp_result); + + /* enable DPM */ + tmp_result = iceland_start_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to start DPM!", return tmp_result); + + tmp_result = iceland_enable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SMC CAC!", return tmp_result); + + tmp_result = iceland_enable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable power containment!", return tmp_result); + + tmp_result = iceland_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to power control set level!", result = tmp_result); + + tmp_result = iceland_enable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable thermal auto throttle!", result = tmp_result); + + return result; +} + +static int iceland_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) +{ + return phm_hwmgr_backend_fini(hwmgr); +} + +static void iceland_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct phw_iceland_ulv_parm *ulv; + + ulv = &data->ulv; + ulv->ch_ulv_parameter = PPICELAND_CGULVPARAMETER_DFLT; + data->voting_rights_clients0 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0; + data->voting_rights_clients1 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1; + data->voting_rights_clients2 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2; + data->voting_rights_clients3 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3; + data->voting_rights_clients4 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4; + data->voting_rights_clients5 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5; + data->voting_rights_clients6 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6; + data->voting_rights_clients7 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7; + + data->static_screen_threshold_unit = PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT; + data->static_screen_threshold = PPICELAND_STATICSCREENTHRESHOLD_DFLT; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ABM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_NonABMSupportInPPLib); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicACTiming); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMemoryTransition); + + iceland_initialize_power_tune_defaults(hwmgr); + + data->mclk_strobe_mode_threshold = 40000; + data->mclk_stutter_mode_threshold = 30000; + data->mclk_edc_enable_threshold = 40000; + data->mclk_edc_wr_enable_threshold = 40000; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMCLS); + + data->pcie_gen_performance.max = PP_PCIEGen1; + data->pcie_gen_performance.min = PP_PCIEGen3; + data->pcie_gen_power_saving.max = PP_PCIEGen1; + data->pcie_gen_power_saving.min = PP_PCIEGen3; + + data->pcie_lane_performance.max = 0; + data->pcie_lane_performance.min = 16; + data->pcie_lane_power_saving.max = 0; + data->pcie_lane_power_saving.min = 16; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification); +} + +static int iceland_get_evv_voltage(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint16_t virtual_voltage_id; + uint16_t vddc = 0; + uint16_t i; + + /* the count indicates actual number of entries */ + data->vddc_leakage.count = 0; + data->vddci_leakage.count = 0; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { + pr_err("Iceland should always support EVV\n"); + return -EINVAL; + } + + /* retrieve voltage for leakage ID (0xff01 + i) */ + for (i = 0; i < ICELAND_MAX_LEAKAGE_COUNT; i++) { + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + + PP_ASSERT_WITH_CODE((0 == atomctrl_get_voltage_evv(hwmgr, virtual_voltage_id, &vddc)), + "Error retrieving EVV voltage value!\n", continue); + + if (vddc >= 2000) + pr_warning("Invalid VDDC value!\n"); + + if (vddc != 0 && vddc != virtual_voltage_id) { + data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; + data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; + data->vddc_leakage.count++; + } + } + + return 0; +} + +static void iceland_patch_with_vddc_leakage(struct pp_hwmgr *hwmgr, + uint32_t *vddc) +{ + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t leakage_index; + struct phw_iceland_leakage_voltage *leakage_table = &data->vddc_leakage; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { + /* + * If this voltage matches a leakage voltage ID, patch + * with actual leakage voltage. + */ + if (leakage_table->leakage_id[leakage_index] == *vddc) { + /* + * Need to make sure vddc is less than 2v or + * else, it could burn the ASIC. + */ + if (leakage_table->actual_voltage[leakage_index] >= 2000) + pr_warning("Invalid VDDC value!\n"); + *vddc = leakage_table->actual_voltage[leakage_index]; + /* we found leakage voltage */ + break; + } + } + + if (*vddc >= ATOM_VIRTUAL_VOLTAGE_ID0) + pr_warning("Voltage value looks like a Leakage ID but it's not patched\n"); +} + +static void iceland_patch_with_vddci_leakage(struct pp_hwmgr *hwmgr, + uint32_t *vddci) +{ + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t leakage_index; + struct phw_iceland_leakage_voltage *leakage_table = &data->vddci_leakage; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { + /* + * If this voltage matches a leakage voltage ID, patch + * with actual leakage voltage. + */ + if (leakage_table->leakage_id[leakage_index] == *vddci) { + *vddci = leakage_table->actual_voltage[leakage_index]; + /* we found leakage voltage */ + break; + } + } + + if (*vddci >= ATOM_VIRTUAL_VOLTAGE_ID0) + pr_warning("Voltage value looks like a Leakage ID but it's not patched\n"); +} + +static int iceland_patch_vddc(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_vddci(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddci_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_vce_vddc(struct pp_hwmgr *hwmgr, + struct phm_vce_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + + +static int iceland_patch_uvd_vddc(struct pp_hwmgr *hwmgr, + struct phm_uvd_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, + struct phm_phase_shedding_limits_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].Voltage); + + return 0; +} + +static int iceland_patch_samu_vddc(struct pp_hwmgr *hwmgr, + struct phm_samu_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_acp_vddc(struct pp_hwmgr *hwmgr, + struct phm_acp_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_limits_vddc(struct pp_hwmgr *hwmgr, + struct phm_clock_and_voltage_limits *tab) +{ + if (tab) { + iceland_patch_with_vddc_leakage(hwmgr, (uint32_t *)&tab->vddc); + iceland_patch_with_vddci_leakage(hwmgr, (uint32_t *)&tab->vddci); + } + + return 0; +} + +static int iceland_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) +{ + uint32_t i; + uint32_t vddc; + + if (tab) { + for (i = 0; i < tab->count; i++) { + vddc = (uint32_t)(tab->entries[i].Vddc); + iceland_patch_with_vddc_leakage(hwmgr, &vddc); + tab->entries[i].Vddc = (uint16_t)vddc; + } + } + + return 0; +} + +static int iceland_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) +{ + int tmp; + + tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); + if(tmp) + return -EINVAL; + + return 0; +} + +static int iceland_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; + struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, + "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, + "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, + "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, + "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + + data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[0].v; + data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; + hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = + allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { + data->min_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[0].v; + data->max_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; + } + + if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1) + hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; + + return 0; +} + +static int iceland_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr) +{ + uint32_t table_size; + struct phm_clock_voltage_dependency_table *table_clk_vlt; + + hwmgr->dyn_state.mclk_sclk_ratio = 4; + hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */ + hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */ + + /* initialize vddc_dep_on_dal_pwrl table */ + table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); + table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); + + if (NULL == table_clk_vlt) { + pr_err("[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); + return -ENOMEM; + } else { + table_clk_vlt->count = 4; + table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW; + table_clk_vlt->entries[0].v = 0; + table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW; + table_clk_vlt->entries[1].v = 720; + table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL; + table_clk_vlt->entries[2].v = 810; + table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; + table_clk_vlt->entries[3].v = 900; + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; + } + + return 0; +} + +/** + * Initializes the Volcanic Islands Hardware Manager + * + * @param hwmgr the address of the powerplay hardware manager. + * @return 1 if success; otherwise appropriate error code. + */ +static int iceland_hwmgr_backend_init(struct pp_hwmgr *hwmgr) +{ + int result = 0; + SMU71_Discrete_DpmTable *table = NULL; + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; + bool stay_in_boot; + struct phw_iceland_ulv_parm *ulv; + struct cgs_system_info sys_info = {0}; + + PP_ASSERT_WITH_CODE((NULL != hwmgr), + "Invalid Parameter!", return -EINVAL;); + + data->dll_defaule_on = 0; + data->sram_end = SMC_RAM_END; + + data->activity_target[0] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[1] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[2] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[3] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[4] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[5] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[6] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[7] = PPICELAND_TARGETACTIVITY_DFLT; + + data->mclk_activity_target = PPICELAND_MCLK_TARGETACTIVITY_DFLT; + + data->sclk_dpm_key_disabled = 0; + data->mclk_dpm_key_disabled = 0; + data->pcie_dpm_key_disabled = 0; + data->pcc_monitor_enabled = 0; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UnTabledHardwareInterface); + + data->gpio_debug = 0; + data->engine_clock_data = 0; + data->memory_clock_data = 0; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleepAboveLow); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPatchPowerState); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + /* Initializes DPM default values. */ + iceland_initialize_dpm_defaults(hwmgr); + + /* Enable Platform EVV support. */ + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); + + /* Get leakage voltage based on leakage ID. */ + result = iceland_get_evv_voltage(hwmgr); + if (result) + goto failed; + + /** + * Patch our voltage dependency table with actual leakage + * voltage. We need to perform leakage translation before it's + * used by other functions such as + * iceland_set_hwmgr_variables_based_on_pptable. + */ + result = iceland_patch_dependency_tables_with_leakage(hwmgr); + if (result) + goto failed; + + /* Parse pptable data read from VBIOS. */ + result = iceland_set_private_var_based_on_pptale(hwmgr); + if (result) + goto failed; + + /* ULV support */ + ulv = &(data->ulv); + ulv->ulv_supported = 1; + + /* Initalize Dynamic State Adjustment Rule Settings*/ + result = iceland_initializa_dynamic_state_adjustment_rule_settings(hwmgr); + if (result) { + pr_err("[ powerplay ] iceland_initializa_dynamic_state_adjustment_rule_settings failed!\n"); + goto failed; + } + + data->voltage_control = ICELAND_VOLTAGE_CONTROL_NONE; + data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_NONE; + data->mvdd_control = ICELAND_VOLTAGE_CONTROL_NONE; + + /* + * Hardcode thermal temperature settings for now, these will + * be overwritten if a custom policy exists. + */ + data->thermal_temp_setting.temperature_low = 99500; + data->thermal_temp_setting.temperature_high = 100000; + data->thermal_temp_setting.temperature_shutdown = 104000; + data->uvd_enabled = false; + + table = &data->smc_state_table; + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, + &gpio_pin_assignment)) { + table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = ICELAND_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin_assignment)) { + table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } else { + table->AcDcGpio = ICELAND_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + /* + * If ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak. + * Current Control feature is enabled and we should program + * PCC HW register + */ + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, + &gpio_pin_assignment)) { + uint32_t temp_reg = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + ixCNB_PWRMGT_CNTL); + + switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { + case 0: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); + break; + case 1: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); + break; + case 2: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); + break; + case 3: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); + break; + case 4: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); + break; + default: + pr_warning("[ powerplay ] Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!\n"); + break; + } + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCNB_PWRMGT_CNTL, temp_reg); + } + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableSMU7ThermalManagement); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMU7); + + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_GPIO_LUT)) + data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_SVID2)) + data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, + VOLTAGE_OBJ_GPIO_LUT)) + data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, + VOLTAGE_OBJ_SVID2)) + data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; + } + + if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, + VOLTAGE_OBJ_GPIO_LUT)) + data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, + VOLTAGE_OBJ_SVID2)) + data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; + } + + if (data->mvdd_control == ICELAND_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl); + + data->vddc_phase_shed_control = false; + + stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StayInBootState); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPowerManagement); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ActivityReporting); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_GFXClockGatingSupport); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPCIEGen2Support); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMC); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisablePowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_BACO); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalAutoThrottling); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableLSClockGating); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SamuDPM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AcpDPM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6inACSupport); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnablePlatformPowerManagement); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PauseMMSessions); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinACSupport); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PauseMMSessions); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_GFXClockGatingManagedInCAIL); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_IcelandULPSSWWorkAround); + + + /* iceland doesn't support UVD and VCE */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (!result) { + if (sys_info.value & AMD_PG_SUPPORT_UVD) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + if (sys_info.value & AMD_PG_SUPPORT_VCE) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + + data->is_tlu_enabled = false; + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + ICELAND_MAX_HARDWARE_POWERLEVELS; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; + else + data->pcie_gen_cap = (uint32_t)sys_info.value; + if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + data->pcie_spc_cap = 20; + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; + else + data->pcie_lane_cap = (uint32_t)sys_info.value; + } else { + /* Ignore return value in here, we are cleaning up a mess. */ + iceland_hwmgr_backend_fini(hwmgr); + } + + return 0; +failed: + return result; +} + +static int iceland_get_num_of_entries(struct pp_hwmgr *hwmgr) +{ + int result; + unsigned long ret = 0; + + result = pp_tables_get_num_of_entries(hwmgr, &ret); + + return result ? 0 : ret; +} + +static const unsigned long PhwIceland_Magic = (unsigned long)(PHM_VIslands_Magic); + +struct iceland_power_state *cast_phw_iceland_power_state( + struct pp_hw_power_state *hw_ps) +{ + if (hw_ps == NULL) + return NULL; + + PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (struct iceland_power_state *)hw_ps; +} + +static int iceland_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *prequest_ps, + const struct pp_power_state *pcurrent_ps) +{ + struct iceland_power_state *iceland_ps = + cast_phw_iceland_power_state(&prequest_ps->hardware); + + uint32_t sclk; + uint32_t mclk; + struct PP_Clocks minimum_clocks = {0}; + bool disable_mclk_switching; + bool disable_mclk_switching_for_frame_lock; + struct cgs_display_info info = {0}; + const struct phm_clock_and_voltage_limits *max_limits; + uint32_t i; + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + int32_t count; + int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; + + data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); + + PP_ASSERT_WITH_CODE(iceland_ps->performance_level_count == 2, + "VI should always have 2 performance levels", + ); + + max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + &(hwmgr->dyn_state.max_clock_voltage_on_ac) : + &(hwmgr->dyn_state.max_clock_voltage_on_dc); + + if (PP_PowerSource_DC == hwmgr->power_source) { + for (i = 0; i < iceland_ps->performance_level_count; i++) { + if (iceland_ps->performance_levels[i].memory_clock > max_limits->mclk) + iceland_ps->performance_levels[i].memory_clock = max_limits->mclk; + if (iceland_ps->performance_levels[i].engine_clock > max_limits->sclk) + iceland_ps->performance_levels[i].engine_clock = max_limits->sclk; + } + } + + iceland_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk; + iceland_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { + + max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); + stable_pstate_sclk = (max_limits->sclk * 75) / 100; + + for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; count >= 0; count--) { + if (stable_pstate_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { + stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; + break; + } + } + + if (count < 0) + stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; + + stable_pstate_mclk = max_limits->mclk; + + minimum_clocks.engineClock = stable_pstate_sclk; + minimum_clocks.memoryClock = stable_pstate_mclk; + } + + if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) + minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; + + if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) + minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; + + iceland_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; + + if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock), + "Overdrive sclk exceeds limit", + hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock); + + if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) + iceland_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive; + } + + if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock), + "Overdrive mclk exceeds limit", + hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock); + + if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) + iceland_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive; + } + + disable_mclk_switching_for_frame_lock = phm_cap_enabled( + hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + + disable_mclk_switching = (1 < info.display_count) || + disable_mclk_switching_for_frame_lock; + + sclk = iceland_ps->performance_levels[0].engine_clock; + mclk = iceland_ps->performance_levels[0].memory_clock; + + if (disable_mclk_switching) + mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock; + + if (sclk < minimum_clocks.engineClock) + sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock; + + if (mclk < minimum_clocks.memoryClock) + mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock; + + iceland_ps->performance_levels[0].engine_clock = sclk; + iceland_ps->performance_levels[0].memory_clock = mclk; + + iceland_ps->performance_levels[1].engine_clock = + (iceland_ps->performance_levels[1].engine_clock >= iceland_ps->performance_levels[0].engine_clock) ? + iceland_ps->performance_levels[1].engine_clock : + iceland_ps->performance_levels[0].engine_clock; + + if (disable_mclk_switching) { + if (mclk < iceland_ps->performance_levels[1].memory_clock) + mclk = iceland_ps->performance_levels[1].memory_clock; + + iceland_ps->performance_levels[0].memory_clock = mclk; + iceland_ps->performance_levels[1].memory_clock = mclk; + } else { + if (iceland_ps->performance_levels[1].memory_clock < iceland_ps->performance_levels[0].memory_clock) + iceland_ps->performance_levels[1].memory_clock = iceland_ps->performance_levels[0].memory_clock; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { + for (i=0; i < iceland_ps->performance_level_count; i++) { + iceland_ps->performance_levels[i].engine_clock = stable_pstate_sclk; + iceland_ps->performance_levels[i].memory_clock = stable_pstate_mclk; + iceland_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; + iceland_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; + } + } + + return 0; +} + +static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + /* + * We return the status of Voltage Control instead of checking SCLK/MCLK DPM + * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, + * whereas voltage control is a fundemental change that will not be disabled + */ + return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0); +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int iceland_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to force SCLK when DPM is disabled", return -1;); + if (0 == data->sclk_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_DPM_ForceState, + n) ? 0 : 1); + + return 0; +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int iceland_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to Force MCLK when DPM is disabled", return -1;); + if (0 == data->mclk_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_ForceState, + n) ? 0 : 1); + + return 0; +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int iceland_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to Force PCIE level when DPM is disabled", return -1;); + if (0 == data->pcie_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + n) ? 0 : 1); + + return 0; +} + +static int iceland_force_dpm_highest(struct pp_hwmgr *hwmgr) +{ + uint32_t level, tmp; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->sclk_dpm_key_disabled) { + /* SCLK */ + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)), + "force highest sclk dpm state failed!", return -1); + PHM_WAIT_INDIRECT_FIELD(hwmgr->device, + SMC_IND, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX, level); + } + } + } + + if (0 == data->mclk_dpm_key_disabled) { + /* MCLK */ + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_mclk(hwmgr, level)), + "force highest mclk dpm state failed!", return -1); + PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND, + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX, level); + } + } + } + + if (0 == data->pcie_dpm_key_disabled) { + /* PCIE */ + if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_pcie(hwmgr, level)), + "force highest pcie dpm state failed!", return -1); + } + } + } + + return 0; +} + +static uint32_t iceland_get_lowest_enable_level(struct pp_hwmgr *hwmgr, + uint32_t level_mask) +{ + uint32_t level = 0; + + while (0 == (level_mask & (1 << level))) + level++; + + return level; +} + +static int iceland_force_dpm_lowest(struct pp_hwmgr *hwmgr) +{ + uint32_t level; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* for now force only sclk */ + if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + level = iceland_get_lowest_enable_level(hwmgr, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)), + "force sclk dpm state failed!", return -1); + + PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND, + TARGET_AND_CURRENT_PROFILE_INDEX, + CURR_SCLK_INDEX, + level); + } + + return 0; +} + +int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + PP_ASSERT_WITH_CODE (0 == iceland_is_dpm_running(hwmgr), + "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", + return -1); + + if (0 == data->sclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( + hwmgr->smumgr, + PPSMC_MSG_NoForcedLevel)), + "unforce sclk dpm state failed!", + return -1); + } + + if (0 == data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( + hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_NoForcedLevel)), + "unforce mclk dpm state failed!", + return -1); + } + + if (0 == data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( + hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_UnForceLevel)), + "unforce pcie level failed!", + return -1); + } + + return 0; +} + +static int iceland_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = iceland_force_dpm_highest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = iceland_force_dpm_lowest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = iceland_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + break; + default: + break; + } + + hwmgr->dpm_level = level; + return ret; +} + +const struct iceland_power_state *cast_const_phw_iceland_power_state( + const struct pp_hw_power_state *hw_ps) +{ + if (hw_ps == NULL) + return NULL; + + PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (const struct iceland_power_state *)hw_ps; +} + +static int iceland_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table); + uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; + struct iceland_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table); + uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; + struct PP_Clocks min_clocks = {0}; + uint32_t i; + struct cgs_display_info info = {0}; + + data->need_update_smu7_dpm_table = 0; + + for (i = 0; i < psclk_table->count; i++) { + if (sclk == psclk_table->dpm_levels[i].value) + break; + } + + if (i >= psclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + else { + /* + * TODO: Check SCLK in DAL's minimum clocks in case DeepSleep + * divider update is required. + */ + if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + for (i = 0; i < pmclk_table->count; i++) { + if (mclk == pmclk_table->dpm_levels[i].value) + break; + } + + if (i >= pmclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; + + return 0; +} + +static uint16_t iceland_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_ps) +{ + uint32_t i; + uint32_t pcie_speed, max_speed = 0; + + for (i = 0; i < hw_ps->performance_level_count; i++) { + pcie_speed = hw_ps->performance_levels[i].pcie_gen; + if (max_speed < pcie_speed) + max_speed = pcie_speed; + } + + return max_speed; +} + +static uint16_t iceland_get_current_pcie_speed(struct pp_hwmgr *hwmgr) +{ + uint32_t speed_cntl = 0; + + speed_cntl = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__PCIE, + ixPCIE_LC_SPEED_CNTL); + return((uint16_t)PHM_GET_FIELD(speed_cntl, + PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); +} + + +static int iceland_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_power_state *iceland_nps = cast_const_phw_iceland_power_state(states->pnew_state); + const struct iceland_power_state *iceland_cps = cast_const_phw_iceland_power_state(states->pcurrent_state); + + uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_nps); + uint16_t current_link_speed; + + if (data->force_pcie_gen == PP_PCIEGenInvalid) + current_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_cps); + else + current_link_speed = data->force_pcie_gen; + + data->force_pcie_gen = PP_PCIEGenInvalid; + data->pspp_notify_required = false; + if (target_link_speed > current_link_speed) { + switch(target_link_speed) { + case PP_PCIEGen3: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) + break; + data->force_pcie_gen = PP_PCIEGen2; + if (current_link_speed == PP_PCIEGen2) + break; + case PP_PCIEGen2: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) + break; + default: + data->force_pcie_gen = iceland_get_current_pcie_speed(hwmgr); + break; + } + } else { + if (target_link_speed < current_link_speed) + data->pspp_notify_required = true; + } + + return 0; +} + +static int iceland_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + PP_ASSERT_WITH_CODE( + 0 == iceland_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_FreezeLevel), + "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", + return -1); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + DPMTABLE_OD_UPDATE_MCLK)) { + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_FreezeLevel), + "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", + return -1); + } + + return 0; +} + +static int iceland_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input) +{ + int result = 0; + + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; + uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; + struct iceland_dpm_table *pdpm_table = &data->dpm_table; + + struct iceland_dpm_table *pgolden_dpm_table = &data->golden_dpm_table; + uint32_t dpm_count, clock_percent; + uint32_t i; + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { + pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + /* + * Need to do calculation based on the golden DPM table + * as the Heatmap GPU Clock axis is also based on the default values + */ + PP_ASSERT_WITH_CODE( + (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0), + "Divide by 0!", + return -1); + dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2; + for (i = dpm_count; i > 1; i--) { + if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) { + clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) / + pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; + + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value + + (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; + + } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) { + clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) / + pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; + + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value - + (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; + } else + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { + pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + + PP_ASSERT_WITH_CODE( + (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0), + "Divide by 0!", + return -1); + dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2; + for (i = dpm_count; i > 1; i--) { + if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) { + clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) / + pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; + + pdpm_table->mclk_table.dpm_levels[i].value = + pgolden_dpm_table->mclk_table.dpm_levels[i].value + + (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; + + } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) { + clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) / + pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; + + pdpm_table->mclk_table.dpm_levels[i].value = + pgolden_dpm_table->mclk_table.dpm_levels[i].value - + (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; + } else + pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value; + } + } + } + + + if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { + result = iceland_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + /*populate MCLK dpm table to SMU7 */ + result = iceland_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + return result; +} + +static int iceland_trim_single_dpm_states(struct pp_hwmgr *hwmgr, + struct iceland_single_dpm_table *pdpm_table, + uint32_t low_limit, uint32_t high_limit) +{ + uint32_t i; + + for (i = 0; i < pdpm_table->count; i++) { + if ((pdpm_table->dpm_levels[i].value < low_limit) || + (pdpm_table->dpm_levels[i].value > high_limit)) + pdpm_table->dpm_levels[i].enabled = false; + else + pdpm_table->dpm_levels[i].enabled = true; + } + return 0; +} + +static int iceland_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_state) +{ + int result = 0; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t high_limit_count; + + PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1), + "power state did not have any performance level", + return -1); + + high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1; + + iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.sclk_table), + hw_state->performance_levels[0].engine_clock, + hw_state->performance_levels[high_limit_count].engine_clock); + + iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.mclk_table), + hw_state->performance_levels[0].memory_clock, + hw_state->performance_levels[high_limit_count].memory_clock); + + return result; +} + +static int iceland_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input) +{ + int result; + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + + result = iceland_trim_dpm_states(hwmgr, iceland_ps); + if (0 != result) + return result; + + data->dpm_level_enable_mask.sclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); + data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); + data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask; + if (data->uvd_enabled && (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)) + data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; + + data->dpm_level_enable_mask.pcie_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); + + return 0; +} + +static int iceland_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) +{ + return 0; +} + +int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = iceland_copy_bytes_to_smc( + hwmgr->smumgr, + data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + data->sram_end + ); + } + + return result; +} + +static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + uint32_t address; + int32_t result; + + if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + + memset(&data->mc_reg_table, 0, sizeof(SMU71_Discrete_MCRegisters)); + + result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table)); + + if(result != 0) + return result; + + + address = data->mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]); + + return iceland_copy_bytes_to_smc(hwmgr->smumgr, address, + (uint8_t *)&data->mc_reg_table.data[0], + sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, + data->sram_end); +} + +static int iceland_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return iceland_program_memory_timing_parameters(hwmgr); + + return 0; +} + +static int iceland_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_UnfreezeLevel), + "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", + return -1); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { + + PP_ASSERT_WITH_CODE( + 0 == iceland_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_UnfreezeLevel), + "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", + return -1); + } + + data->need_update_smu7_dpm_table = 0; + + return 0; +} + +static int iceland_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_ps); + uint8_t request; + + if (data->pspp_notify_required || + data->pcie_performance_request) { + if (target_link_speed == PP_PCIEGen3) + request = PCIE_PERF_REQ_GEN3; + else if (target_link_speed == PP_PCIEGen2) + request = PCIE_PERF_REQ_GEN2; + else + request = PCIE_PERF_REQ_GEN1; + + if(request == PCIE_PERF_REQ_GEN1 && iceland_get_current_pcie_speed(hwmgr) > 0) { + data->pcie_performance_request = false; + return 0; + } + + if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) { + if (PP_PCIEGen2 == target_link_speed) + printk("PSPP request to switch to Gen2 from Gen3 Failed!"); + else + printk("PSPP request to switch to Gen1 from Gen2 Failed!"); + } + } + + data->pcie_performance_request = false; + return 0; +} + +int iceland_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) +{ + PPSMC_Result result; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->sclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + if (0 != iceland_is_dpm_running(hwmgr)) + printk(KERN_ERR "[ powerplay ] Trying to set Enable Sclk Mask when DPM is disabled \n"); + + if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + result = smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == result), + "Set Sclk Dpm enable Mask failed", return -1); + } + } + + if (0 == data->mclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + if (0 != iceland_is_dpm_running(hwmgr)) + printk(KERN_ERR "[ powerplay ] Trying to set Enable Mclk Mask when DPM is disabled \n"); + + if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) { + result = smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == result), + "Set Mclk Dpm enable Mask failed", return -1); + } + } + + return 0; +} + +static int iceland_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) +{ + int tmp_result, result = 0; + + tmp_result = iceland_find_dpm_states_clocks_in_dpm_table(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = iceland_request_link_speed_change_before_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result); + } + + tmp_result = iceland_freeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result); + + tmp_result = iceland_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); + + tmp_result = iceland_generate_dpm_level_enable_mask(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result); + + tmp_result = iceland_update_vce_dpm(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result); + + tmp_result = iceland_update_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result); + + tmp_result = iceland_update_and_upload_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result); + + tmp_result = iceland_program_memory_timing_parameters_conditionally(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result); + + tmp_result = iceland_unfreeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result); + + tmp_result = iceland_upload_dpm_level_enable_mask(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = iceland_notify_link_speed_change_after_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result); + } + + return result; +} + +static int iceland_get_power_state_size(struct pp_hwmgr *hwmgr) +{ + return sizeof(struct iceland_power_state); +} + +static int iceland_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + if (low) + return iceland_ps->performance_levels[0].memory_clock; + else + return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; +} + +static int iceland_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + if (low) + return iceland_ps->performance_levels[0].engine_clock; + else + return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; +} + +static int iceland_get_current_pcie_lane_number( + struct pp_hwmgr *hwmgr) +{ + uint32_t link_width; + + link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__PCIE, + PCIE_LC_LINK_WIDTH_CNTL, + LC_LINK_WIDTH_RD); + + PP_ASSERT_WITH_CODE((7 >= link_width), + "Invalid PCIe lane width!", return 0); + + return decode_pcie_lane_width(link_width); +} + +static int iceland_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_power_state *ps = (struct iceland_power_state *)hw_ps; + ATOM_FIRMWARE_INFO_V2_2 *fw_info; + uint16_t size; + uint8_t frev, crev; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + + /* First retrieve the Boot clocks and VDDC from the firmware info table. + * We assume here that fw_info is unchanged if this call fails. + */ + fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( + hwmgr->device, index, + &size, &frev, &crev); + if (!fw_info) + /* During a test, there is no firmware info table. */ + return 0; + + /* Patch the state. */ + data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock); + data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock); + data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage); + data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage); + data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage); + data->vbios_boot_state.pcie_gen_bootup_value = iceland_get_current_pcie_speed(hwmgr); + data->vbios_boot_state.pcie_lane_bootup_value = + (uint16_t)iceland_get_current_pcie_lane_number(hwmgr); + + /* set boot power state */ + ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; + ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; + ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; + ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; + + return 0; +} + +static int iceland_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *power_state, + unsigned int index, const void *clock_info) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_power_state *iceland_power_state = cast_phw_iceland_power_state(power_state); + const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; + struct iceland_performance_level *performance_level; + uint32_t engine_clock, memory_clock; + uint16_t pcie_gen_from_bios; + + engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; + memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; + + if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) + data->highest_mclk = memory_clock; + + performance_level = &(iceland_power_state->performance_levels + [iceland_power_state->performance_level_count++]); + + PP_ASSERT_WITH_CODE( + (iceland_power_state->performance_level_count < SMU71_MAX_LEVELS_GRAPHICS), + "Performance levels exceeds SMC limit!", + return -1); + + PP_ASSERT_WITH_CODE( + (iceland_power_state->performance_level_count <= + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), + "Performance levels exceeds Driver limit!", + return -1); + + /* Performance levels are arranged from low to high. */ + performance_level->memory_clock = memory_clock; + performance_level->engine_clock = engine_clock; + + pcie_gen_from_bios = visland_clk_info->ucPCIEGen; + + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); + performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); + + return 0; +} + +static int iceland_get_pp_table_entry(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + int result; + struct iceland_power_state *ps; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct phm_clock_voltage_dependency_table *dep_mclk_table = + hwmgr->dyn_state.vddci_dependency_on_mclk; + + memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); + + state->hardware.magic = PHM_VIslands_Magic; + + ps = (struct iceland_power_state *)(&state->hardware); + + result = pp_tables_get_entry(hwmgr, entry_index, state, + iceland_get_pp_table_entry_callback_func); + + /* + * This is the earliest time we have all the dependency table + * and the VBIOS boot state as + * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot + * state if there is only one VDDCI/MCLK level, check if it's + * the same as VBIOS boot state + */ + if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { + if (dep_mclk_table->entries[0].clk != + data->vbios_boot_state.mclk_bootup_value) + printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " + "does not match VBIOS boot MCLK level"); + if (dep_mclk_table->entries[0].v != + data->vbios_boot_state.vddci_bootup_value) + printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " + "does not match VBIOS boot VDDCI level"); + } + + /* set DC compatible flag if this state supports DC */ + if (!state->validation.disallowOnDC) + ps->dc_compatible = true; + + if (state->classification.flags & PP_StateClassificationFlag_ACPI) + data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; + else if (0 != (state->classification.flags & PP_StateClassificationFlag_Boot)) { + if (data->bacos.best_match == 0xffff) { + /* For C.I. use boot state as base BACO state */ + data->bacos.best_match = PP_StateClassificationFlag_Boot; + data->bacos.performance_level = ps->performance_levels[0]; + } + } + + + ps->uvd_clocks.VCLK = state->uvd_clocks.VCLK; + ps->uvd_clocks.DCLK = state->uvd_clocks.DCLK; + + if (!result) { + uint32_t i; + + switch (state->classification.ui_label) { + case PP_StateUILabel_Performance: + data->use_pcie_performance_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_performance.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_performance.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_performance.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_performance.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.min = + ps->performance_levels[i].pcie_lane; + } + break; + case PP_StateUILabel_Battery: + data->use_pcie_power_saving_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_power_saving.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_power_saving.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_power_saving.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_power_saving.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.min = + ps->performance_levels[i].pcie_lane; + } + break; + default: + break; + } + } + return 0; +} + +static void +iceland_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) +{ + uint32_t sclk, mclk, activity_percent; + uint32_t offset; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency)); + + sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency)); + + mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100); + + offset = data->soft_regs_start + offsetof(SMU71_SoftRegisters, AverageGraphicsActivity); + activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); + activity_percent += 0x80; + activity_percent >>= 8; + + seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); + + seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); + + seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); +} + +int iceland_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) +{ + uint32_t num_active_displays = 0; + struct cgs_display_info info = {0}; + info.mode_info = NULL; + + cgs_get_active_displays_info(hwmgr->device, &info); + + num_active_displays = info.display_count; + + if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ + iceland_notify_smc_display_change(hwmgr, false); + else + iceland_notify_smc_display_change(hwmgr, true); + + return 0; +} + +/** +* Programs the display gap +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always OK +*/ +int iceland_program_display_gap(struct pp_hwmgr *hwmgr) +{ + uint32_t num_active_displays = 0; + uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); + uint32_t display_gap2; + uint32_t pre_vbi_time_in_us; + uint32_t frame_time_in_us; + uint32_t ref_clock; + uint32_t refresh_rate = 0; + struct cgs_display_info info = {0}; + struct cgs_mode_info mode_info; + + info.mode_info = &mode_info; + + cgs_get_active_displays_info(hwmgr->device, &info); + num_active_displays = info.display_count; + + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); + + ref_clock = mode_info.ref_clock; + refresh_rate = mode_info.refresh_rate; + + if(0 == refresh_rate) + refresh_rate = 60; + + frame_time_in_us = 1000000 / refresh_rate; + + pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_4, PreVBlankGap, 0x64); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_5, VBlankTimeout, (frame_time_in_us - pre_vbi_time_in_us)); + + if (num_active_displays == 1) + iceland_notify_smc_display_change(hwmgr, true); + + return 0; +} + +int iceland_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + iceland_program_display_gap(hwmgr); + + return 0; +} + +/** +* Set maximum target operating fan output PWM +* +* @param pHwMgr: the address of the powerplay hardware manager. +* @param usMaxFanPwm: max operating fan PWM in percents +* @return The response that came from the SMC. +*/ +static int iceland_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) +{ + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1); +} + +/** +* Set maximum target operating fan output RPM +* +* @param pHwMgr: the address of the powerplay hardware manager. +* @param usMaxFanRpm: max operating fan RPM value. +* @return The response that came from the SMC. +*/ +static int iceland_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) +{ + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1); +} + +static int iceland_dpm_set_interrupt_state(void *private_data, + unsigned src_id, unsigned type, + int enabled) +{ + uint32_t cg_thermal_int; + struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr; + + if (hwmgr == NULL) + return -EINVAL; + + switch (type) { + case AMD_THERMAL_IRQ_LOW_TO_HIGH: + if (enabled) { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } else { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } + break; + + case AMD_THERMAL_IRQ_HIGH_TO_LOW: + if (enabled) { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } else { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } + break; + default: + break; + } + return 0; +} + +static int iceland_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, + const void *thermal_interrupt_info) +{ + int result; + const struct pp_interrupt_registration_info *info = + (const struct pp_interrupt_registration_info *)thermal_interrupt_info; + + if (info == NULL) + return -EINVAL; + + result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST, + iceland_dpm_set_interrupt_state, + info->call_back, info->context); + + if (result) + return -EINVAL; + + result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST, + iceland_dpm_set_interrupt_state, + info->call_back, info->context); + + if (result) + return -EINVAL; + + return 0; +} + + +static bool iceland_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + bool is_update_required = false; + struct cgs_display_info info = {0,0,NULL}; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + is_update_required = true; +/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL + if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { + cgs_get_min_clock_settings(hwmgr->device, &min_clocks); + if(min_clocks.engineClockInSR != data->display_timing.minClockInSR) + is_update_required = true; +*/ + return is_update_required; +} + + +static inline bool iceland_are_power_levels_equal(const struct iceland_performance_level *pl1, + const struct iceland_performance_level *pl2) +{ + return ((pl1->memory_clock == pl2->memory_clock) && + (pl1->engine_clock == pl2->engine_clock) && + (pl1->pcie_gen == pl2->pcie_gen) && + (pl1->pcie_lane == pl2->pcie_lane)); +} + +int iceland_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, + const struct pp_hw_power_state *pstate2, bool *equal) +{ + const struct iceland_power_state *psa = cast_const_phw_iceland_power_state(pstate1); + const struct iceland_power_state *psb = cast_const_phw_iceland_power_state(pstate2); + int i; + + if (equal == NULL || psa == NULL || psb == NULL) + return -EINVAL; + + /* If the two states don't even have the same number of performance levels they cannot be the same state. */ + if (psa->performance_level_count != psb->performance_level_count) { + *equal = false; + return 0; + } + + for (i = 0; i < psa->performance_level_count; i++) { + if (!iceland_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { + /* If we have found even one performance level pair that is different the states are different. */ + *equal = false; + return 0; + } + } + + /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ + *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK)); + *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK)); + *equal &= (psa->sclk_threshold == psb->sclk_threshold); + *equal &= (psa->acp_clk == psb->acp_clk); + + return 0; +} + +static int iceland_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + if (mode) { + /* stop auto-manage */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + iceland_fan_ctrl_stop_smc_fan_control(hwmgr); + iceland_fan_ctrl_set_static_mode(hwmgr, mode); + } else + /* restart auto-manage */ + iceland_fan_ctrl_reset_fan_speed_to_default(hwmgr); + + return 0; +} + +static int iceland_get_fan_control_mode(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->fan_ctrl_is_in_default_mode) + return hwmgr->fan_ctrl_default_mode; + else + return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE); +} + +static int iceland_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!data->sclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); + break; + case PP_MCLK: + if (!data->mclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); + break; + case PP_PCIE: + { + uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; + uint32_t level = 0; + + while (tmp >>= 1) + level++; + + if (!data->pcie_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + level); + break; + } + default: + break; + } + + return 0; +} + +static int iceland_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct iceland_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = iceland_get_current_pcie_speed(hwmgr); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + +static int iceland_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct iceland_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int iceland_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int iceland_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct iceland_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr) +{ + uint32_t reference_clock; + uint32_t tc; + uint32_t divide; + + ATOM_FIRMWARE_INFO *fw_info; + uint16_t size; + uint8_t frev, crev; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + + tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); + + if (tc) + return TCLK; + + fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index, + &size, &frev, &crev); + + if (!fw_info) + return 0; + + reference_clock = le16_to_cpu(fw_info->usReferenceClock); + + divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); + + if (0 != divide) + return reference_clock / 4; + + return reference_clock; +} + +static int iceland_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + +static const struct pp_hwmgr_func iceland_hwmgr_funcs = { + .backend_init = &iceland_hwmgr_backend_init, + .backend_fini = &iceland_hwmgr_backend_fini, + .asic_setup = &iceland_setup_asic_task, + .dynamic_state_management_enable = &iceland_enable_dpm_tasks, + .apply_state_adjust_rules = iceland_apply_state_adjust_rules, + .force_dpm_level = &iceland_force_dpm_level, + .power_state_set = iceland_set_power_state_tasks, + .get_power_state_size = iceland_get_power_state_size, + .get_mclk = iceland_dpm_get_mclk, + .get_sclk = iceland_dpm_get_sclk, + .patch_boot_state = iceland_dpm_patch_boot_state, + .get_pp_table_entry = iceland_get_pp_table_entry, + .get_num_of_pp_table_entries = iceland_get_num_of_entries, + .print_current_perforce_level = iceland_print_current_perforce_level, + .powerdown_uvd = iceland_phm_powerdown_uvd, + .powergate_uvd = iceland_phm_powergate_uvd, + .powergate_vce = iceland_phm_powergate_vce, + .disable_clock_power_gating = iceland_phm_disable_clock_power_gating, + .update_clock_gatings = iceland_phm_update_clock_gatings, + .notify_smc_display_config_after_ps_adjustment = iceland_notify_smc_display_config_after_ps_adjustment, + .display_config_changed = iceland_display_configuration_changed_task, + .set_max_fan_pwm_output = iceland_set_max_fan_pwm_output, + .set_max_fan_rpm_output = iceland_set_max_fan_rpm_output, + .get_temperature = iceland_thermal_get_temperature, + .stop_thermal_controller = iceland_thermal_stop_thermal_controller, + .get_fan_speed_info = iceland_fan_ctrl_get_fan_speed_info, + .get_fan_speed_percent = iceland_fan_ctrl_get_fan_speed_percent, + .set_fan_speed_percent = iceland_fan_ctrl_set_fan_speed_percent, + .reset_fan_speed_to_default = iceland_fan_ctrl_reset_fan_speed_to_default, + .get_fan_speed_rpm = iceland_fan_ctrl_get_fan_speed_rpm, + .set_fan_speed_rpm = iceland_fan_ctrl_set_fan_speed_rpm, + .uninitialize_thermal_controller = iceland_thermal_ctrl_uninitialize_thermal_controller, + .register_internal_thermal_interrupt = iceland_register_internal_thermal_interrupt, + .check_smc_update_required_for_display_configuration = iceland_check_smc_update_required_for_display_configuration, + .check_states_equal = iceland_check_states_equal, + .set_fan_control_mode = iceland_set_fan_control_mode, + .get_fan_control_mode = iceland_get_fan_control_mode, + .force_clock_level = iceland_force_clock_level, + .print_clock_levels = iceland_print_clock_levels, + .get_sclk_od = iceland_get_sclk_od, + .set_sclk_od = iceland_set_sclk_od, + .get_mclk_od = iceland_get_mclk_od, + .set_mclk_od = iceland_set_mclk_od, +}; + +int iceland_hwmgr_init(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data; + + data = kzalloc (sizeof(iceland_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + memset(data, 0x00, sizeof(iceland_hwmgr)); + + hwmgr->backend = data; + hwmgr->hwmgr_func = &iceland_hwmgr_funcs; + hwmgr->pptable_func = &pptable_funcs; + + /* thermal */ + pp_iceland_thermal_initialize(hwmgr); + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h new file mode 100644 index 0000000..f253988 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h @@ -0,0 +1,424 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ +#ifndef ICELAND_HWMGR_H +#define ICELAND_HWMGR_H + +#include "hwmgr.h" +#include "ppatomctrl.h" +#include "ppinterrupt.h" +#include "ppsmc.h" +#include "iceland_powertune.h" +#include "pp_endian.h" +#include "smu71_discrete.h" + +#define ICELAND_MAX_HARDWARE_POWERLEVELS 2 +#define ICELAND_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15 + +struct iceland_performance_level { + uint32_t memory_clock; + uint32_t engine_clock; + uint16_t pcie_gen; + uint16_t pcie_lane; +}; + +struct _phw_iceland_bacos { + uint32_t best_match; + uint32_t baco_flags; + struct iceland_performance_level performance_level; +}; +typedef struct _phw_iceland_bacos phw_iceland_bacos; + +struct _phw_iceland_uvd_clocks { + uint32_t VCLK; + uint32_t DCLK; +}; + +typedef struct _phw_iceland_uvd_clocks phw_iceland_uvd_clocks; + +struct _phw_iceland_vce_clocks { + uint32_t EVCLK; + uint32_t ECCLK; +}; + +typedef struct _phw_iceland_vce_clocks phw_iceland_vce_clocks; + +struct iceland_power_state { + uint32_t magic; + phw_iceland_uvd_clocks uvd_clocks; + phw_iceland_vce_clocks vce_clocks; + uint32_t sam_clk; + uint32_t acp_clk; + uint16_t performance_level_count; + bool dc_compatible; + uint32_t sclk_threshold; + struct iceland_performance_level performance_levels[ICELAND_MAX_HARDWARE_POWERLEVELS]; +}; + +struct _phw_iceland_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; +typedef struct _phw_iceland_dpm_level phw_iceland_dpm_level; + +#define ICELAND_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define MAX_REGULAR_DPM_NUMBER 8 +#define ICELAND_MINIMUM_ENGINE_CLOCK 5000 + +struct iceland_single_dpm_table { + uint32_t count; + phw_iceland_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; +}; + +struct iceland_dpm_table { + struct iceland_single_dpm_table sclk_table; + struct iceland_single_dpm_table mclk_table; + struct iceland_single_dpm_table pcie_speed_table; + struct iceland_single_dpm_table vddc_table; + struct iceland_single_dpm_table vdd_gfx_table; + struct iceland_single_dpm_table vdd_ci_table; + struct iceland_single_dpm_table mvdd_table; +}; +typedef struct _phw_iceland_dpm_table phw_iceland_dpm_table; + + +struct _phw_iceland_clock_regisiters { + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_FUNC_CNTL_4; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t vDLL_CNTL; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL_1; + uint32_t vMPLL_FUNC_CNTL_2; + uint32_t vMPLL_SS1; + uint32_t vMPLL_SS2; +}; +typedef struct _phw_iceland_clock_regisiters phw_iceland_clock_registers; + +struct _phw_iceland_voltage_smio_registers { + uint32_t vs0_vid_lower_smio_cntl; +}; +typedef struct _phw_iceland_voltage_smio_registers phw_iceland_voltage_smio_registers; + + +struct _phw_iceland_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; +typedef struct _phw_iceland_mc_reg_entry phw_iceland_mc_reg_entry; + +struct _phw_iceland_mc_reg_table { + uint8_t last; /* number of registers*/ + uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/ + uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/ + phw_iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; +typedef struct _phw_iceland_mc_reg_table phw_iceland_mc_reg_table; + +#define DISABLE_MC_LOADMICROCODE 1 +#define DISABLE_MC_CFGPROGRAMMING 2 + + +/*Ultra Low Voltage parameter structure */ +struct phw_iceland_ulv_parm{ + bool ulv_supported; + uint32_t ch_ulv_parameter; + uint32_t ulv_volt_change_delay; + struct iceland_performance_level ulv_power_level; +}; + +#define ICELAND_MAX_LEAKAGE_COUNT 8 + +struct phw_iceland_leakage_voltage { + uint16_t count; + uint16_t leakage_id[ICELAND_MAX_LEAKAGE_COUNT]; + uint16_t actual_voltage[ICELAND_MAX_LEAKAGE_COUNT]; +}; + +struct _phw_iceland_display_timing { + uint32_t min_clock_insr; + uint32_t num_existing_displays; +}; +typedef struct _phw_iceland_display_timing phw_iceland_display_timing; + + +struct phw_iceland_thermal_temperature_setting +{ + long temperature_low; + long temperature_high; + long temperature_shutdown; +}; + +struct _phw_iceland_dpmlevel_enable_mask { + uint32_t uvd_dpm_enable_mask; + uint32_t vce_dpm_enable_mask; + uint32_t acp_dpm_enable_mask; + uint32_t samu_dpm_enable_mask; + uint32_t sclk_dpm_enable_mask; + uint32_t mclk_dpm_enable_mask; + uint32_t pcie_dpm_enable_mask; +}; +typedef struct _phw_iceland_dpmlevel_enable_mask phw_iceland_dpmlevel_enable_mask; + +struct _phw_iceland_pcie_perf_range { + uint16_t max; + uint16_t min; +}; +typedef struct _phw_iceland_pcie_perf_range phw_iceland_pcie_perf_range; + +struct _phw_iceland_vbios_boot_state { + uint16_t mvdd_bootup_value; + uint16_t vddc_bootup_value; + uint16_t vddci_bootup_value; + uint16_t vddgfx_bootup_value; + uint32_t sclk_bootup_value; + uint32_t mclk_bootup_value; + uint16_t pcie_gen_bootup_value; + uint16_t pcie_lane_bootup_value; +}; +typedef struct _phw_iceland_vbios_boot_state phw_iceland_vbios_boot_state; + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 + +/* We need to review which fields are needed. */ +/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */ +struct iceland_hwmgr { + struct iceland_dpm_table dpm_table; + struct iceland_dpm_table golden_dpm_table; + + uint32_t voting_rights_clients0; + uint32_t voting_rights_clients1; + uint32_t voting_rights_clients2; + uint32_t voting_rights_clients3; + uint32_t voting_rights_clients4; + uint32_t voting_rights_clients5; + uint32_t voting_rights_clients6; + uint32_t voting_rights_clients7; + uint32_t static_screen_threshold_unit; + uint32_t static_screen_threshold; + uint32_t voltage_control; + uint32_t vdd_gfx_control; + + uint32_t vddc_vddci_delta; + uint32_t vddc_vddgfx_delta; + + struct pp_interrupt_registration_info internal_high_thermal_interrupt_info; + struct pp_interrupt_registration_info internal_low_thermal_interrupt_info; + struct pp_interrupt_registration_info smc_to_host_interrupt_info; + uint32_t active_auto_throttle_sources; + + struct pp_interrupt_registration_info external_throttle_interrupt; + irq_handler_func_t external_throttle_callback; + void *external_throttle_context; + + struct pp_interrupt_registration_info ctf_interrupt_info; + irq_handler_func_t ctf_callback; + void *ctf_context; + + phw_iceland_clock_registers clock_registers; + phw_iceland_voltage_smio_registers voltage_smio_registers; + + bool is_memory_GDDR5; + uint16_t acpi_vddc; + bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */ + uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */ + uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */ + uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */ + uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */ + uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */ + struct phw_iceland_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/ + struct phw_iceland_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */ + struct phw_iceland_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */ + + uint32_t mvdd_control; + uint32_t vddc_mask_low; + uint32_t mvdd_mask_low; + uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/ + uint16_t min_vddc_in_pp_table; + uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */ + uint16_t min_vddci_in_pp_table; + uint32_t mclk_strobe_mode_threshold; + uint32_t mclk_stutter_mode_threshold; + uint32_t mclk_edc_enable_threshold; + uint32_t mclk_edc_wr_enable_threshold; + bool is_uvd_enabled; + bool is_xdma_enabled; + phw_iceland_vbios_boot_state vbios_boot_state; + + bool battery_state; + bool is_tlu_enabled; + bool pcie_performance_request; + + /* -------------- SMC SRAM Address of firmware header tables ----------------*/ + uint32_t sram_end; /* The first address after the SMC SRAM. */ + uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */ + uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */ + uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */ + uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */ + uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */ + uint32_t ulv_settings_start; + SMU71_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */ + SMU71_Discrete_MCRegisters mc_reg_table; + SMU71_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */ + + /* -------------- Stuff originally coming from Evergreen --------------------*/ + phw_iceland_mc_reg_table iceland_mc_reg_table; + uint32_t vdd_ci_control; + pp_atomctrl_voltage_table vddc_voltage_table; + pp_atomctrl_voltage_table vddci_voltage_table; + pp_atomctrl_voltage_table vddgfx_voltage_table; + pp_atomctrl_voltage_table mvdd_voltage_table; + + uint32_t mgcg_cgtt_local2; + uint32_t mgcg_cgtt_local3; + uint32_t gpio_debug; + uint32_t mc_micro_code_feature; + uint32_t highest_mclk; + uint16_t acpi_vdd_ci; + uint8_t mvdd_high_index; + uint8_t mvdd_low_index; + bool dll_defaule_on; + bool performance_request_registered; + + /* ----------------- Low Power Features ---------------------*/ + phw_iceland_bacos bacos; + struct phw_iceland_ulv_parm ulv; + + /* ----------------- CAC Stuff ---------------------*/ + uint32_t cac_table_start; + bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */ + bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */ + bool cac_enabled; + + /* ----------------- DPM2 Parameters ---------------------*/ + uint32_t power_containment_features; + bool enable_bapm_feature; + bool enable_dte_feature; + bool enable_tdc_limit_feature; + bool enable_pkg_pwr_tracking_feature; + bool disable_uvd_power_tune_feature; + struct iceland_pt_defaults *power_tune_defaults; + SMU71_Discrete_PmFuses power_tune_table; + uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ + uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ + + /* ----------------- Phase Shedding ---------------------*/ + bool vddc_phase_shed_control; + + /* --------------------- DI/DT --------------------------*/ + phw_iceland_display_timing display_timing; + + /* --------- ReadRegistry data for memory and engine clock margins ---- */ + uint32_t engine_clock_data; + uint32_t memory_clock_data; + + /* -------- Thermal Temperature Setting --------------*/ + struct phw_iceland_thermal_temperature_setting thermal_temp_setting; + phw_iceland_dpmlevel_enable_mask dpm_level_enable_mask; + + uint32_t need_update_smu7_dpm_table; + uint32_t sclk_dpm_key_disabled; + uint32_t mclk_dpm_key_disabled; + uint32_t pcie_dpm_key_disabled; + /* used to store the previous dal min sclock */ + uint32_t min_engine_clocks; + phw_iceland_pcie_perf_range pcie_gen_performance; + phw_iceland_pcie_perf_range pcie_lane_performance; + phw_iceland_pcie_perf_range pcie_gen_power_saving; + phw_iceland_pcie_perf_range pcie_lane_power_saving; + bool use_pcie_performance_levels; + bool use_pcie_power_saving_levels; + /* percentage value from 0-100, default 50 */ + uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS]; + uint32_t mclk_activity_target; + uint32_t low_sclk_interrupt_threshold; + uint32_t last_mclk_dpm_enable_mask; + bool uvd_enabled; + uint32_t pcc_monitor_enabled; + + /* --------- Power Gating States ------------*/ + bool uvd_power_gated; /* 1: gated, 0:not gated */ + bool vce_power_gated; /* 1: gated, 0:not gated */ + bool samu_power_gated; /* 1: gated, 0:not gated */ + bool acp_power_gated; /* 1: gated, 0:not gated */ + bool pg_acp_init; + + /* soft pptable for re-uploading into smu */ + void *soft_pp_table; +}; + +typedef struct iceland_hwmgr iceland_hwmgr; + +int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); +int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); +uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr); +int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr); +int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr); + +#define ICELAND_DPM2_NEAR_TDP_DEC 10 +#define ICELAND_DPM2_ABOVE_SAFE_INC 5 +#define ICELAND_DPM2_BELOW_SAFE_INC 20 + +/* + * Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size + * is 128, then this value should be Log2(128) = 7. + */ +#define ICELAND_DPM2_LTA_WINDOW_SIZE 7 + +#define ICELAND_DPM2_LTS_TRUNCATE 0 + +#define ICELAND_DPM2_TDP_SAFE_LIMIT_PERCENT 80 // Maximum 100 + +#define ICELAND_DPM2_MAXPS_PERCENT_H 90 // Maximum 0xFF +#define ICELAND_DPM2_MAXPS_PERCENT_M 90 // Maximum 0xFF + +#define ICELAND_DPM2_PWREFFICIENCYRATIO_MARGIN 50 + +#define ICELAND_DPM2_SQ_RAMP_MAX_POWER 0x3FFF +#define ICELAND_DPM2_SQ_RAMP_MIN_POWER 0x12 +#define ICELAND_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 +#define ICELAND_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E +#define ICELAND_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF + +#define ICELAND_VOLTAGE_CONTROL_NONE 0x0 +#define ICELAND_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define ICELAND_VOLTAGE_CONTROL_BY_SVID2 0x2 + +/* convert to Q8.8 format for firmware */ +#define ICELAND_Q88_FORMAT_CONVERSION_UNIT 256 + +#define ICELAND_UNUSED_GPIO_PIN 0x7F + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c new file mode 100644 index 0000000..041e964 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c @@ -0,0 +1,490 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ + +#include "amdgpu.h" +#include "hwmgr.h" +#include "smumgr.h" +#include "iceland_hwmgr.h" +#include "iceland_powertune.h" +#include "iceland_smumgr.h" +#include "smu71_discrete.h" +#include "smu71.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "pp_endian.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 + +#define DEVICE_ID_VI_ICELAND_M_6900 0x6900 +#define DEVICE_ID_VI_ICELAND_M_6901 0x6901 +#define DEVICE_ID_VI_ICELAND_M_6902 0x6902 +#define DEVICE_ID_VI_ICELAND_M_6903 0x6903 + + +struct iceland_pt_defaults defaults_iceland = +{ + /* + * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, + * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } +}; + +/* 35W - XT, XTL */ +struct iceland_pt_defaults defaults_icelandxt = +{ + /* + * sviLoadLIneEn, SviLoadLineVddC, + * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, + * BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, + { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0}, + { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} +}; + +/* 25W - PRO, LE */ +struct iceland_pt_defaults defaults_icelandpro = +{ + /* + * sviLoadLIneEn, SviLoadLineVddC, + * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, + * BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, + { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0}, + { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} +}; + +void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t tmp = 0; + struct cgs_system_info sys_info = {0}; + uint32_t pdev_id; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + pdev_id = (uint32_t)sys_info.value; + + switch (pdev_id) { + case DEVICE_ID_VI_ICELAND_M_6900: + case DEVICE_ID_VI_ICELAND_M_6903: + data->power_tune_defaults = &defaults_icelandxt; + break; + + case DEVICE_ID_VI_ICELAND_M_6901: + case DEVICE_ID_VI_ICELAND_M_6902: + data->power_tune_defaults = &defaults_icelandpro; + break; + default: + /* TODO: need to assign valid defaults */ + data->power_tune_defaults = &defaults_iceland; + pr_warning("Unknown V.I. Device ID.\n"); + break; + } + + /* Assume disabled */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + data->ul_dte_tj_offset = tmp; + + if (!tmp) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + data->fast_watermark_threshold = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + tmp = 1; + data->enable_dte_feature = tmp ? false : true; + data->enable_tdc_limit_feature = tmp ? true : false; + data->enable_pkg_pwr_tracking_feature = tmp ? true : false; + } + } +} + +int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_pt_defaults *defaults = data->power_tune_defaults; + SMU71_Discrete_DpmTable *dpm_table = &(data->smc_state_table); + struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; + struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table; + uint16_t *def1, *def2; + int i, j, k; + + /* + * TDP number of fraction bits are changed from 8 to 7 for Iceland + * as requested by SMC team + */ + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + dpm_table->DTETjOffset = (uint8_t)data->ul_dte_tj_offset; + + dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + /* The following are for new Iceland Multi-input fan/thermal control */ + if(NULL != ppm) { + dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000; + dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256; + } else { + dpm_table->PPM_PkgPwrLimit = 0; + dpm_table->PPM_TemperatureLimit = 0; + } + + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); + + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + def1 = defaults->bapmti_r; + def2 = defaults->bapmti_rc; + + for (i = 0; i < SMU71_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU71_DTE_SOURCES; j++) { + for (k = 0; k < SMU71_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1); + dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2); + def1++; + def2++; + } + } + } + + return 0; +} + +static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_pt_defaults *defaults = data->power_tune_defaults; + + data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc; + data->power_tune_table.SviLoadLineTrimVddC = 3; + data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_pt_defaults *defaults = data->power_tune_defaults; + + /* TDC number of fraction bits are changed from 8 to 7 + * for Iceland as requested by SMC team + */ + tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); + data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_pt_defaults *defaults = data->power_tune_defaults; + uint32_t temp; + + if (iceland_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else + data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 8; i++) + data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; + + HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(HiSidd); + data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(LoSidd); + + return 0; +} + +int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + /* DW0 - DW3 */ + if (iceland_populate_bapm_vddc_vid_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate bapm vddc vid Failed!", + return -EINVAL); + + /* DW4 - DW5 */ + if (iceland_populate_vddc_vid(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate vddc vid Failed!", + return -EINVAL); + + /* DW6 */ + if (iceland_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + /* DW7 */ + if (iceland_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + /* DW8 */ + if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + /* DW9-DW12 */ + if (0 != iceland_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + /* DW13-DW16 */ + if (iceland_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + /* DW17 */ + if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + /* DW18 */ + if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", + return -EINVAL); + + if (iceland_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&data->power_tune_table, + sizeof(struct SMU71_Discrete_PmFuses), data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC)) { + int smc_result; + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableCac)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable CAC in SMC.", result = -1); + + data->cac_enabled = (0 == smc_result) ? true : false; + } + return result; +} + +static int iceland_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if(data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PkgPwrSetLimit, n); + return 0; +} + +static int iceland_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) +{ + return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, + PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); +} + +int iceland_enable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + SMU71_Discrete_DpmTable *dpm_table = &data->smc_state_table; + int smc_result; + int result = 0; + uint32_t is_asic_kicker; + + data->power_containment_features = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + is_asic_kicker = cgs_read_register(hwmgr->device, mmCC_BIF_BX_STRAP2); + is_asic_kicker = (is_asic_kicker >> 12) & 0x01; + + if (data->enable_bapm_feature && + (!is_asic_kicker || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableDTE)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable BAPM in SMC.", result = -1;); + if (0 == smc_result) + data->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; + } + + if (is_asic_kicker && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc)) + dpm_table->DTEMode = 2; + + if (data->enable_tdc_limit_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitEnable)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable TDCLimit in SMC.", result = -1;); + if (0 == smc_result) + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_TDCLimit; + } + + if (data->enable_pkg_pwr_tracking_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable PkgPwrTracking in SMC.", result = -1;); + if (0 == smc_result) { + struct phm_cac_tdp_table *cac_table = + hwmgr->dyn_state.cac_dtp_table; + uint32_t default_limit = + (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); + + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_PkgPwrLimit; + + if (iceland_set_power_limit(hwmgr, default_limit)) + printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); + } + } + } + return result; +} + +int iceland_power_control_set_level(struct pp_hwmgr *hwmgr) +{ + struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; + int adjust_percent, target_tdp; + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + /* adjustment percentage has already been validated */ + adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? + hwmgr->platform_descriptor.TDPAdjustment : + (-1 * hwmgr->platform_descriptor.TDPAdjustment); + /* + * SMC requested that target_tdp to be 7 bit fraction in DPM table + * but message to be 8 bit fraction for messages + */ + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + result = iceland_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); + } + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h new file mode 100644 index 0000000..6c25ee1 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h @@ -0,0 +1,74 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ +#ifndef ICELAND_POWERTUNE_H +#define ICELAND_POWERTUNE_H + +#include "smu71.h" + +enum iceland_pt_config_reg_type { + ICELAND_CONFIGREG_MMR = 0, + ICELAND_CONFIGREG_SMC_IND, + ICELAND_CONFIGREG_DIDT_IND, + ICELAND_CONFIGREG_CACHE, + ICELAND_CONFIGREG_MAX +}; + +/* PowerContainment Features */ +#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 +#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 +#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 +#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 + +struct iceland_pt_config_reg { + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; + enum iceland_pt_config_reg_type type; +}; + +struct iceland_pt_defaults +{ + uint8_t svi_load_line_en; + uint8_t svi_load_line_vddc; + uint8_t tdc_vddc_throttle_release_limit_perc; + uint8_t tdc_mawt; + uint8_t tdc_waterfall_ctl; + uint8_t dte_ambient_temp_base; + uint32_t display_cac; + uint32_t bamp_temp_gradient; + uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; + uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; +}; + +void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); +int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); +int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr); +int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr); +int iceland_enable_power_containment(struct pp_hwmgr *hwmgr); +int iceland_power_control_set_level(struct pp_hwmgr *hwmgr); + +#endif /* ICELAND_POWERTUNE_H */ + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c new file mode 100644 index 0000000..527f370 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c @@ -0,0 +1,595 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ +#include <asm/div64.h> +#include "iceland_thermal.h" +#include "iceland_hwmgr.h" +#include "iceland_smumgr.h" +#include "atombios.h" +#include "ppsmc.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" + + +/** +* Get Fan Speed Control Parameters. +* @param hwmgr the address of the powerplay hardware manager. +* @param pSpeed is the address of the structure where the result is to be placed. +* @exception Always succeeds except if we cannot zero out the output structure. +*/ +int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, + struct phm_fan_speed_info *fan_speed_info) +{ + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + fan_speed_info->supports_percent_read = true; + fan_speed_info->supports_percent_write = true; + fan_speed_info->min_percent = 0; + fan_speed_info->max_percent = 100; + + if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { + fan_speed_info->supports_rpm_read = true; + fan_speed_info->supports_rpm_write = true; + fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; + fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; + } else { + fan_speed_info->min_rpm = 0; + fan_speed_info->max_rpm = 0; + } + + return 0; +} + +/** +* Get Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param pSpeed is the address of the structure where the result is to be placed. +* @exception Fails is the 100% setting appears to be 0. +*/ +int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY); + + if (0 == duty100) + return -EINVAL; + + + tmp64 = (uint64_t)duty * 100; + do_div(tmp64, duty100); + *speed = (uint32_t)tmp64; + + if (*speed > 100) + *speed = 100; + + return 0; +} + +/** +* Get Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the address of the structure where the result is to be placed. +* @exception Returns not supported if no fan is found or if pulses per revolution are not set +*/ +int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + return 0; +} + +/** +* Set Fan Speed Control to static mode, so that the user can decide what speed to use. +* @param hwmgr the address of the powerplay hardware manager. +* mode the fan control mode, 0 default, 1 by percent, 5, by RPM +* @exception Should always succeed. +*/ +int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + + if (hwmgr->fan_ctrl_is_in_default_mode) { + hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE); + hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN); + hwmgr->fan_ctrl_is_in_default_mode = false; + } + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode); + + return 0; +} + +/** +* Reset Fan Speed Control to default mode. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Should always succeed. +*/ +static int iceland_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->fan_ctrl_is_in_default_mode) { + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin); + hwmgr->fan_ctrl_is_in_default_mode = true; + } + + return 0; +} + +int iceland_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL; +} + + +int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL; +} + +/** +* Set Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (0% - 100%) to be set. +* @exception Fails is the 100% setting appears to be 0. +*/ +int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return -EINVAL; + + if (speed > 100) { + pr_warning("Cannot set more than 100%% duty cycle. Set it to 100.\n"); + speed = 100; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + iceland_fan_ctrl_stop_smc_fan_control(hwmgr); + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) + return -EINVAL; + + tmp64 = (uint64_t)speed * duty100; + do_div(tmp64, 100); + duty = (uint32_t)tmp64; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); + + return iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); +} + +/** +* Reset Fan Speed to default. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Always succeeds. +*/ +int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) +{ + int result; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { + result = iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + if (0 == result) + result = iceland_fan_ctrl_start_smc_fan_control(hwmgr); + } else + result = iceland_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (min - max) to be set. +* @exception Fails is the speed not lie between min and max. +*/ +int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + return 0; +} + +/** +* Reads the remote temperature from the SIslands thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr) +{ + int temp; + + temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP); + + /* + * Bit 9 means the reading is lower than the lowest usable + * value. + */ + if (0 != (0x200 & temp)) + temp = ICELAND_THERMAL_MAXIMUM_TEMP_READING; + else + temp = (temp & 0x1ff); + + temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return temp; +} + +/** +* Set the requested temperature range for high and low alert signals +* +* @param hwmgr The address of the hardware manager. +* @param range Temperature range to be programmed for high and low alert signals +* @exception PP_Result_BadInput if the input data is not valid. +*/ +static int iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp) +{ + uint32_t low = ICELAND_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + uint32_t high = ICELAND_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + if (low < low_temp) + low = low_temp; + if (high > high_temp) + high = high_temp; + + if (low > high) + return -EINVAL; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + + return 0; +} + +/** +* Programs thermal controller one-time setting registers +* +* @param hwmgr The address of the hardware manager. +*/ +static int iceland_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_TACH_CTRL, EDGE_PER_REV, + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); + + return 0; +} + +/** +* Enable thermal alerts on the RV770 thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +static int iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); + alert &= ~(ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to enable internal thermal interrupts */ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1; +} + +/** +* Disable thermal alerts on the RV770 thermal controller. +* @param hwmgr The address of the hardware manager. +*/ +static int iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); + alert |= (ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to disable internal thermal interrupts */ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1; +} + +/** +* Uninitialize the thermal controller. +* Currently just disables alerts. +* @param hwmgr The address of the hardware manager. +*/ +int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) +{ + int result = iceland_thermal_disable_alert(hwmgr); + + if (result) + pr_warning("Failed to disable thermal alerts!\n"); + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + iceland_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + return 0; + + if (0 == data->fan_table_start) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = iceland_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); + + //fan_table.FanControl_GL_Flag = 1; + + res = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end); +/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. + if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0) + res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \ + hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1); + + if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0) + res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \ + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1); + + if (0 != res) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); +*/ + return 0; +} + +/** +* Start the fan control on the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_iceland_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ +/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table. + * Make sure that we still think controlling the fan is OK. +*/ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { + iceland_fan_ctrl_start_smc_fan_control(hwmgr); + iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + } + + return 0; +} + +/** +* Set temperature range for high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +static int tf_iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + + if (range == NULL) + return -EINVAL; + + return iceland_thermal_set_temperature_range(hwmgr, range->min, range->max); +} + +/** +* Programs one-time setting registers +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from initialize thermal controller routine +*/ +static int tf_iceland_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + return iceland_thermal_initialize(hwmgr); +} + +/** +* Enable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from enable alert routine +*/ +static int tf_iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return iceland_thermal_enable_alert(hwmgr); +} + +/** +* Disable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from disable alert routine +*/ +static int tf_iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ + return iceland_thermal_disable_alert(hwmgr); +} + +static const struct phm_master_table_item iceland_thermal_start_thermal_controller_master_list[] = { + { NULL, tf_iceland_thermal_initialize }, + { NULL, tf_iceland_thermal_set_temperature_range }, + { NULL, tf_iceland_thermal_enable_alert }, + /* + * We should restrict performance levels to low before we halt + * the SMC. On the other hand we are still in boot state when + * we do this so it would be pointless. If this assumption + * changes we have to revisit this table. + */ + { NULL, tf_iceland_thermal_setup_fan_table}, + { NULL, tf_iceland_thermal_start_smc_fan_control}, + { NULL, NULL } +}; + +static const struct phm_master_table_header iceland_thermal_start_thermal_controller_master = { + 0, + PHM_MasterTableFlag_None, + iceland_thermal_start_thermal_controller_master_list +}; + +static const struct phm_master_table_item iceland_thermal_set_temperature_range_master_list[] = { + { NULL, tf_iceland_thermal_disable_alert}, + { NULL, tf_iceland_thermal_set_temperature_range}, + { NULL, tf_iceland_thermal_enable_alert}, + { NULL, NULL } +}; + +static const struct phm_master_table_header iceland_thermal_set_temperature_range_master = { + 0, + PHM_MasterTableFlag_None, + iceland_thermal_set_temperature_range_master_list +}; + +int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->thermal_controller.fanInfo.bNoFan) + iceland_fan_ctrl_set_default_mode(hwmgr); + return 0; +} + +/** +* Initializes the thermal controller related functions in the Hardware Manager structure. +* @param hwmgr The address of the hardware manager. +* @exception Any error code from the low-level communication. +*/ +int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + int result; + + result = phm_construct_table(hwmgr, &iceland_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range)); + + if (0 == result) { + result = phm_construct_table(hwmgr, + &iceland_thermal_start_thermal_controller_master, + &(hwmgr->start_thermal_controller)); + if (0 != result) + phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); + } + + if (0 == result) + hwmgr->fan_ctrl_is_in_default_mode = true; + return result; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h new file mode 100644 index 0000000..267945f --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h @@ -0,0 +1,58 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ + +#ifndef ICELAND_THERMAL_H +#define ICELAND_THERMAL_H + +#include "hwmgr.h" + +#define ICELAND_THERMAL_HIGH_ALERT_MASK 0x1 +#define ICELAND_THERMAL_LOW_ALERT_MASK 0x2 + +#define ICELAND_THERMAL_MINIMUM_TEMP_READING -256 +#define ICELAND_THERMAL_MAXIMUM_TEMP_READING 255 + +#define ICELAND_THERMAL_MINIMUM_ALERT_TEMP 0 +#define ICELAND_THERMAL_MAXIMUM_ALERT_TEMP 255 + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + + +extern int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr); +extern int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); +extern int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); +extern int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); +extern int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr); +extern int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); +extern int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index 783dcc3..b691322 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -97,19 +97,6 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) - -static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] = -{ {600, 1050, 3, 0}, {600, 1050, 6, 1} }; - -/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */ -static const uint32_t polaris10_clock_stretcher_ddt_table[2][4][4] = -{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, - { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; - -/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */ -static const uint8_t polaris10_clock_stretch_amount_conversion[2][6] = -{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} }; - /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ enum DPM_EVENT_SRC { DPM_EVENT_SRC_ANALOG = 0, @@ -2772,9 +2759,6 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState); if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE) @@ -2819,13 +2803,6 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); - if (hwmgr->powercontainment_enabled) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - else - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); @@ -2904,8 +2881,8 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr) continue; } - /* need to make sure vddc is less than 2v or else, it could burn the ASIC. - * real voltage level in unit of 0.01mv */ + /* need to make sure vddc is less than 2V or else, it could burn the ASIC. + * real voltage level in unit of 0.01mV */ PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0), "Invalid VDDC value", result = -EINVAL;); @@ -3142,7 +3119,10 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr) table_info->vddc_lookup_table; uint32_t i; - if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) { + if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7 && + ((hwmgr->sub_sys_id == 0xb37 && hwmgr->sub_vendor_id == 0x1002) || + (hwmgr->sub_sys_id == 0x4a8 && hwmgr->sub_vendor_id == 0x1043) || + (hwmgr->sub_sys_id == 0x9480 && hwmgr->sub_vendor_id == 0x1682))) { if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 7dc4afd..7f9ba7f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -301,6 +301,8 @@ void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition); + tonga_initialize_power_tune_defaults(hwmgr); + data->mclk_strobe_mode_threshold = 40000; data->mclk_stutter_mode_threshold = 30000; data->mclk_edc_enable_threshold = 40000; @@ -2478,7 +2480,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t graphic_level->VoltageDownHyst = 0; graphic_level->PowerThrottle = 0; - threshold = engine_clock * data->fast_watemark_threshold / 100; + threshold = engine_clock * data->fast_watermark_threshold / 100; /* *get the DAL clock. do it in funture. PECI_GetMinClockSettings(hwmgr->peci, &minClocks); @@ -2981,6 +2983,10 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot Level!", return result;); + result = tonga_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(result == 0, + "Failed to populate BAPM Parameters!", return result); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher)) { result = tonga_populate_clock_stretcher_data_table(hwmgr); @@ -4369,6 +4375,10 @@ int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to initialize ARB table index!", result = tmp_result); + tmp_result = tonga_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to populate PM fuses!", result = tmp_result); + tmp_result = tonga_populate_initial_mc_reg_table(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate initialize MC Reg table!", result = tmp_result); @@ -4387,6 +4397,18 @@ int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to start DPM!", result = tmp_result); + tmp_result = tonga_enable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable SMC CAC!", result = tmp_result); + + tmp_result = tonga_enable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable power containment!", result = tmp_result); + + tmp_result = tonga_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to power control set level!", result = tmp_result); + return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h index 3961884..fcad942 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h @@ -300,6 +300,7 @@ struct tonga_hwmgr { bool dll_defaule_on; bool performance_request_registered; + /* ----------------- Low Power Features ---------------------*/ phw_tonga_bacos bacos; phw_tonga_ulv_parm ulv; @@ -314,10 +315,14 @@ struct tonga_hwmgr { bool enable_tdc_limit_feature; bool enable_pkg_pwr_tracking_feature; bool disable_uvd_power_tune_feature; - phw_tonga_pt_defaults *power_tune_defaults; + struct tonga_pt_defaults *power_tune_defaults; SMU72_Discrete_PmFuses power_tune_table; - uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ - uint32_t fast_watemark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ + uint32_t dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ + uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ + + + bool enable_dte_feature; + /* ----------------- Phase Shedding ---------------------*/ bool vddc_phase_shed_control; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c new file mode 100644 index 0000000..9496ade --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c @@ -0,0 +1,498 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "hwmgr.h" +#include "smumgr.h" +#include "tonga_hwmgr.h" +#include "tonga_powertune.h" +#include "tonga_smumgr.h" +#include "smu72_discrete.h" +#include "pp_debug.h" +#include "tonga_ppsmc.h" + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 + +struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { +/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ + {1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, + {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }, +}; + +void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *tonga_hwmgr = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t tmp = 0; + + if (table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + tonga_hwmgr->power_tune_defaults = + &tonga_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + tonga_hwmgr->power_tune_defaults = &tonga_power_tune_data_set_array[0]; + + /* Assume disabled */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + tonga_hwmgr->dte_tj_offset = tmp; + + if (!tmp) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + tonga_hwmgr->fast_watermark_threshold = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + tmp = 1; + tonga_hwmgr->enable_dte_feature = tmp ? false : true; + tonga_hwmgr->enable_tdc_limit_feature = tmp ? true : false; + tonga_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; + } + } +} + + +int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_pt_defaults *defaults = data->power_tune_defaults; + SMU72_Discrete_DpmTable *dpm_table = &(data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + int i, j, k; + uint16_t *pdef1; + uint16_t *pdef2; + + + /* TDP number of fraction bits are changed from 8 to 7 for Fiji + * as requested by SMC team + */ + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range!", + ); + + dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + pdef1 = defaults->bapmti_r; + pdef2 = defaults->bapmti_rc; + + for (i = 0; i < SMU72_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU72_DTE_SOURCES; j++) { + for (k = 0; k < SMU72_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); + dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); + pdef1++; + pdef2++; + } + } + } + + return 0; +} + +static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + const struct tonga_pt_defaults *defaults = data->power_tune_defaults; + + data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC; + data->power_tune_table.SviLoadLineTrimVddC = 3; + data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + const struct tonga_pt_defaults *defaults = data->power_tune_defaults; + + /* TDC number of fraction bits are changed from 8 to 7 + * for Fiji as requested by SMC team + */ + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256); + data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + const struct tonga_pt_defaults *defaults = data->power_tune_defaults; + uint32_t temp; + + if (tonga_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else + data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if ((hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity & (1 << 15)) || + (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0)) + hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity = hwmgr->thermal_controller. + advanceFanControlParameters.usDefaultFanOutputSensitivity; + + data->power_tune_table.FuzzyFan_PwmSetDelta = + PP_HOST_TO_SMC_US(hwmgr->thermal_controller. + advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); + data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); + + return 0; +} + +int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + /* DW6 */ + if (tonga_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + /* DW7 */ + if (tonga_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + /* DW8 */ + if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl Failed !", + return -EINVAL); + + /* DW9-DW12 */ + if (tonga_populate_temperature_scaler(hwmgr) != 0) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + /* DW13-DW14 */ + if (tonga_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan Control parameters Failed!", + return -EINVAL); + + /* DW15-DW18 */ + if (tonga_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + /* DW19 */ + if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + /* DW20 */ + if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", + return -EINVAL); + + if (tonga_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&data->power_tune_table, + sizeof(struct SMU72_Discrete_PmFuses), data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC)) { + int smc_result; + + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable CAC in SMC.", result = -1); + + data->cac_enabled = (smc_result == 0) ? true : false; + } + return result; +} + +int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC) && data->cac_enabled) { + int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable CAC in SMC.", result = -1); + + data->cac_enabled = false; + } + return result; +} + +int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PkgPwrSetLimit, n); + return 0; +} + +static int tonga_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) +{ + return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, + PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); +} + +int tonga_enable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int smc_result; + int result = 0; + + data->power_containment_features = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (data->enable_dte_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable DTE in SMC.", result = -1;); + if (smc_result == 0) + data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE; + } + + if (data->enable_tdc_limit_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitEnable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable TDCLimit in SMC.", result = -1;); + if (smc_result == 0) + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_TDCLimit; + } + + if (data->enable_pkg_pwr_tracking_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable PkgPwrTracking in SMC.", result = -1;); + if (smc_result == 0) { + struct phm_cac_tdp_table *cac_table = + table_info->cac_dtp_table; + uint32_t default_limit = + (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); + + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_PkgPwrLimit; + + if (tonga_set_power_limit(hwmgr, default_limit)) + printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); + } + } + } + return result; +} + +int tonga_disable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { + int smc_result; + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_TDCLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable TDCLimit in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_DTE) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable DTE in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable PkgPwrTracking in SMC.", + result = smc_result); + } + data->power_containment_features = 0; + } + + return result; +} + +int tonga_power_control_set_level(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + int adjust_percent, target_tdp; + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + /* adjustment percentage has already been validated */ + adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? + hwmgr->platform_descriptor.TDPAdjustment : + (-1 * hwmgr->platform_descriptor.TDPAdjustment); + /* SMC requested that target_tdp to be 7 bit fraction in DPM table + * but message to be 8 bit fraction for messages + */ + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + result = tonga_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); + } + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h index 8e6670b..c8bdb92 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h @@ -35,20 +35,23 @@ enum _phw_tonga_ptc_config_reg_type { typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type; /* PowerContainment Features */ +#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 + + +/* PowerContainment Features */ #define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 -struct _phw_tonga_pt_config_reg { +struct tonga_pt_config_reg { uint32_t Offset; uint32_t Mask; uint32_t Shift; uint32_t Value; phw_tonga_ptc_config_reg_type Type; }; -typedef struct _phw_tonga_pt_config_reg phw_tonga_pt_config_reg; -struct _phw_tonga_pt_defaults { +struct tonga_pt_defaults { uint8_t svi_load_line_en; uint8_t svi_load_line_vddC; uint8_t tdc_vddc_throttle_release_limit_perc; @@ -60,7 +63,18 @@ struct _phw_tonga_pt_defaults { uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; }; -typedef struct _phw_tonga_pt_defaults phw_tonga_pt_defaults; + + + +void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); +int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); +int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr); +int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr); +int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr); +int tonga_enable_power_containment(struct pp_hwmgr *hwmgr); +int tonga_disable_power_containment(struct pp_hwmgr *hwmgr); +int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); +int tonga_power_control_set_level(struct pp_hwmgr *hwmgr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index b764c8c..3f8172f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -132,8 +132,10 @@ struct amd_pp_init { uint32_t chip_family; uint32_t chip_id; uint32_t rev_id; - bool powercontainment_enabled; + uint16_t sub_sys_id; + uint16_t sub_vendor_id; }; + enum amd_pp_display_config_type{ AMD_PP_DisplayConfigType_None = 0, AMD_PP_DisplayConfigType_DP54 , diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index bf0d2ac..36b4ec9 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -41,6 +41,9 @@ struct phm_fan_speed_info; struct pp_atomctrl_voltage_table; +extern int amdgpu_powercontainment; +extern int amdgpu_sclk_deep_sleep_en; + enum DISPLAY_GAP { DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */ DISPLAY_GAP_VBLANK = 1, /* Wait for vblank. */ @@ -614,7 +617,6 @@ struct pp_hwmgr { uint32_t num_ps; struct pp_thermal_controller_info thermal_controller; bool fan_ctrl_is_in_default_mode; - bool powercontainment_enabled; uint32_t fan_ctrl_default_mode; uint32_t tmin; struct phm_microcode_version_info microcode_version_info; @@ -637,16 +639,7 @@ extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr); extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); -extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t index, uint32_t value, uint32_t mask); - -extern uint32_t phm_read_indirect_register(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, uint32_t index); -extern void phm_write_indirect_register(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value); extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t indirect_port, @@ -654,12 +647,7 @@ extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t value, uint32_t mask); -extern void phm_wait_for_indirect_register_unequal( - struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask); + extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr); extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr); @@ -697,43 +685,7 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); PHM_FIELD_SHIFT(reg, field)) -#define PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, index, value, mask) \ - phm_wait_on_register(hwmgr, index, value, mask) - -#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, index, value, mask) \ - phm_wait_for_register_unequal(hwmgr, index, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX, index, value, mask) -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX_0, index, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX_0, index, value, mask) - -/* Operations on named registers. */ - -#define PHM_WAIT_REGISTER(hwmgr, reg, value, mask) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg, value, mask) - -#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ - PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ - PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) /* Operations on named fields. */ @@ -762,60 +714,16 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field, fieldval)) -#define PHM_WAIT_FIELD(hwmgr, reg, field, fieldval) \ - PHM_WAIT_REGISTER(hwmgr, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) +#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ + phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask) -#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) -#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) +#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ + PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) -#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \ - PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \ +#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) -/* Operations on arrays of registers & fields. */ - -#define PHM_READ_ARRAY_REGISTER(device, reg, offset) \ - cgs_read_register(device, mm##reg + (offset)) - -#define PHM_WRITE_ARRAY_REGISTER(device, reg, offset, value) \ - cgs_write_register(device, mm##reg + (offset), value) - -#define PHM_WAIT_ARRAY_REGISTER(hwmgr, reg, offset, value, mask) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask) - -#define PHM_WAIT_ARRAY_REGISTER_UNEQUAL(hwmgr, reg, offset, value, mask) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask) - -#define PHM_READ_ARRAY_FIELD(hwmgr, reg, offset, field) \ - PHM_GET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), reg, field) - -#define PHM_WRITE_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WRITE_ARRAY_REGISTER(hwmgr->device, reg, offset, \ - PHM_SET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), \ - reg, field, fieldvalue)) - -#define PHM_WAIT_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), \ - (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \ - PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_ARRAY_FIELD_UNEQUAL(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), \ - (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \ - PHM_FIELD_MASK(reg, field)) #endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/powerplay/inc/smu71.h new file mode 100644 index 0000000..71c9b2d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu71.h @@ -0,0 +1,510 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU71_H +#define SMU71_H + +#if !defined(SMC_MICROCODE) +#pragma pack(push, 1) +#endif + +#define SMU__NUM_PCIE_DPM_LEVELS 8 +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 4 +#define SMU__VARIANT__ICELAND 1 +#define SMU__DGPU_ONLY 1 +#define SMU__DYNAMIC_MCARB_SETTINGS 1 + +enum SID_OPTION { + SID_OPTION_HI, + SID_OPTION_LO, + SID_OPTION_COUNT +}; + +typedef struct { + uint32_t high; + uint32_t low; +} data_64_t; + +typedef struct { + data_64_t high; + data_64_t low; +} data_128_t; + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + +#define SMU71_MAX_LEVELS_VDDC 8 +#define SMU71_MAX_LEVELS_VDDCI 4 +#define SMU71_MAX_LEVELS_MVDD 4 +#define SMU71_MAX_LEVELS_VDDNB 8 + +#define SMU71_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE +#define SMU71_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS +#define SMU71_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS +#define SMU71_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS +#define SMU71_MAX_ENTRIES_SMIO 32 + +#define DPM_NO_LIMIT 0 +#define DPM_NO_UP 1 +#define DPM_GO_DOWN 2 +#define DPM_GO_UP 3 + +#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0 +#define SMU7_FIRST_DPM_MEMORY_LEVEL 0 + +#define GPIO_CLAMP_MODE_VRHOT 1 +#define GPIO_CLAMP_MODE_THERM 2 +#define GPIO_CLAMP_MODE_DC 4 + +#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0 +#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT) +#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3 +#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT) +#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6 +#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT) +#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9 +#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT) +#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12 +#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT) +#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15 +#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT) +#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18 +#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT) +#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21 +#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT) +#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24 +#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT) +#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27 +#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT) + + +#if defined SMU__DGPU_ONLY +#define SMU71_DTE_ITERATIONS 5 +#define SMU71_DTE_SOURCES 3 +#define SMU71_DTE_SINKS 1 +#define SMU71_NUM_CPU_TES 0 +#define SMU71_NUM_GPU_TES 1 +#define SMU71_NUM_NON_TES 2 + +#endif + +#if defined SMU__FUSION_ONLY +#define SMU7_DTE_ITERATIONS 5 +#define SMU7_DTE_SOURCES 5 +#define SMU7_DTE_SINKS 3 +#define SMU7_NUM_CPU_TES 2 +#define SMU7_NUM_GPU_TES 1 +#define SMU7_NUM_NON_TES 2 + +#endif + +struct SMU71_PIDController +{ + uint32_t Ki; + int32_t LFWindupUpperLim; + int32_t LFWindupLowerLim; + uint32_t StatePrecision; + uint32_t LfPrecision; + uint32_t LfOffset; + uint32_t MaxState; + uint32_t MaxLfFraction; + uint32_t StateShift; +}; + +typedef struct SMU71_PIDController SMU71_PIDController; + +struct SMU7_LocalDpmScoreboard +{ + uint32_t PercentageBusy; + + int32_t PIDError; + int32_t PIDIntegral; + int32_t PIDOutput; + + uint32_t SigmaDeltaAccum; + uint32_t SigmaDeltaOutput; + uint32_t SigmaDeltaLevel; + + uint32_t UtilizationSetpoint; + + uint8_t TdpClampMode; + uint8_t TdcClampMode; + uint8_t ThermClampMode; + uint8_t VoltageBusy; + + int8_t CurrLevel; + int8_t TargLevel; + uint8_t LevelChangeInProgress; + uint8_t UpHyst; + + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t DpmEnable; + uint8_t DpmRunning; + + uint8_t DpmForce; + uint8_t DpmForceLevel; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + + uint32_t MinimumPerfSclk; + + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t GfxClkSlow; + uint8_t GpioClampMode; + + uint8_t FpsFilterWeight; + uint8_t EnabledLevelsChange; + uint8_t DteClampMode; + uint8_t FpsClampMode; + + uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_GRAPHICS]; + uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_GRAPHICS]; + + void (*TargetStateCalculator)(uint8_t); + void (*SavedTargetStateCalculator)(uint8_t); + + uint16_t AutoDpmInterval; + uint16_t AutoDpmRange; + + uint8_t FpsEnabled; + uint8_t MaxPerfLevel; + uint8_t AllowLowClkInterruptToHost; + uint8_t FpsRunning; + + uint32_t MaxAllowedFrequency; +}; + +typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard; + +#define SMU7_MAX_VOLTAGE_CLIENTS 12 + +struct SMU7_VoltageScoreboard +{ + uint16_t CurrentVoltage; + uint16_t HighestVoltage; + uint16_t MaxVid; + uint8_t HighestVidOffset; + uint8_t CurrentVidOffset; +#if defined (SMU__DGPU_ONLY) + uint8_t CurrentPhases; + uint8_t HighestPhases; +#else + uint8_t AvsOffset; + uint8_t AvsOffsetApplied; +#endif + uint8_t ControllerBusy; + uint8_t CurrentVid; + uint16_t RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS]; +#if defined (SMU__DGPU_ONLY) + uint8_t RequestedPhases[SMU7_MAX_VOLTAGE_CLIENTS]; +#endif + uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS]; + uint8_t TargetIndex; + uint8_t Delay; + uint8_t ControllerEnable; + uint8_t ControllerRunning; + uint16_t CurrentStdVoltageHiSidd; + uint16_t CurrentStdVoltageLoSidd; +#if defined (SMU__DGPU_ONLY) + uint16_t RequestedVddci; + uint16_t CurrentVddci; + uint16_t HighestVddci; + uint8_t CurrentVddciVid; + uint8_t TargetVddciIndex; +#endif +}; + +typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard; + +// ------------------------------------------------------------------------------------------------------------------------- +#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */ + +struct SMU7_PCIeLinkSpeedScoreboard +{ + uint8_t DpmEnable; + uint8_t DpmRunning; + uint8_t DpmForce; + uint8_t DpmForceLevel; + + uint8_t CurrentLinkSpeed; + uint8_t EnabledLevelsChange; + uint16_t AutoDpmInterval; + + uint16_t AutoDpmRange; + uint16_t AutoDpmCount; + + uint8_t DpmMode; + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t CurrentLinkLevel; + +}; + +typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard; + +// -------------------------------------------------------- CAC table ------------------------------------------------------ +#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 +#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16 + +#define SMU7_SCALE_I 7 +#define SMU7_SCALE_R 12 + +struct SMU7_PowerScoreboard +{ + uint16_t MinVoltage; + uint16_t MaxVoltage; + + uint32_t AvgGpuPower; + + uint16_t VddcLeakagePower[SID_OPTION_COUNT]; + uint16_t VddcSclkConstantPower[SID_OPTION_COUNT]; + uint16_t VddcSclkDynamicPower[SID_OPTION_COUNT]; + uint16_t VddcNonSclkDynamicPower[SID_OPTION_COUNT]; + uint16_t VddcTotalPower[SID_OPTION_COUNT]; + uint16_t VddcTotalCurrent[SID_OPTION_COUNT]; + uint16_t VddcLoadVoltage[SID_OPTION_COUNT]; + uint16_t VddcNoLoadVoltage[SID_OPTION_COUNT]; + + uint16_t DisplayPhyPower; + uint16_t PciePhyPower; + + uint16_t VddciTotalPower; + uint16_t Vddr1TotalPower; + + uint32_t RocPower; + + uint32_t last_power; + uint32_t enableWinAvg; + + uint32_t lkg_acc; + uint16_t VoltLkgeScaler; + uint16_t TempLkgeScaler; + + uint32_t uvd_cac_dclk; + uint32_t uvd_cac_vclk; + uint32_t vce_cac_eclk; + uint32_t samu_cac_samclk; + uint32_t display_cac_dispclk; + uint32_t acp_cac_aclk; + uint32_t unb_cac; + + uint32_t WinTime; + + uint16_t GpuPwr_MAWt; + uint16_t FilteredVddcTotalPower; + + uint8_t CalculationRepeats; + uint8_t WaterfallUp; + uint8_t WaterfallDown; + uint8_t WaterfallLimit; +}; + +typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard; + +// -------------------------------------------------------------------------------------------------- + +struct SMU7_ThermalScoreboard +{ + int16_t GpuLimit; + int16_t GpuHyst; + uint16_t CurrGnbTemp; + uint16_t FilteredGnbTemp; + uint8_t ControllerEnable; + uint8_t ControllerRunning; + uint8_t WaterfallUp; + uint8_t WaterfallDown; + uint8_t WaterfallLimit; + uint8_t padding[3]; +}; + +typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard; + +// For FeatureEnables: +#define SMU7_SCLK_DPM_CONFIG_MASK 0x01 +#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02 +#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04 +#define SMU7_MCLK_DPM_CONFIG_MASK 0x08 +#define SMU7_UVD_DPM_CONFIG_MASK 0x10 +#define SMU7_VCE_DPM_CONFIG_MASK 0x20 +#define SMU7_ACP_DPM_CONFIG_MASK 0x40 +#define SMU7_SAMU_DPM_CONFIG_MASK 0x80 +#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100 + +#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001 +#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002 +#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100 +#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200 +#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000 +#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000 + +// All 'soft registers' should be uint32_t. +struct SMU71_SoftRegisters +{ + uint32_t RefClockFrequency; + uint32_t PmTimerPeriod; + uint32_t FeatureEnables; +#if defined (SMU__DGPU_ONLY) + uint32_t PreVBlankGap; + uint32_t VBlankTimeout; + uint32_t TrainTimeGap; + uint32_t MvddSwitchTime; + uint32_t LongestAcpiTrainTime; + uint32_t AcpiDelay; + uint32_t G5TrainTime; + uint32_t DelayMpllPwron; + uint32_t VoltageChangeTimeout; +#endif + uint32_t HandshakeDisables; + + uint8_t DisplayPhy1Config; + uint8_t DisplayPhy2Config; + uint8_t DisplayPhy3Config; + uint8_t DisplayPhy4Config; + + uint8_t DisplayPhy5Config; + uint8_t DisplayPhy6Config; + uint8_t DisplayPhy7Config; + uint8_t DisplayPhy8Config; + + uint32_t AverageGraphicsActivity; + uint32_t AverageMemoryActivity; + uint32_t AverageGioActivity; + + uint8_t SClkDpmEnabledLevels; + uint8_t MClkDpmEnabledLevels; + uint8_t LClkDpmEnabledLevels; + uint8_t PCIeDpmEnabledLevels; + + uint32_t DRAM_LOG_ADDR_H; + uint32_t DRAM_LOG_ADDR_L; + uint32_t DRAM_LOG_PHY_ADDR_H; + uint32_t DRAM_LOG_PHY_ADDR_L; + uint32_t DRAM_LOG_BUFF_SIZE; + uint32_t UlvEnterCount; + uint32_t UlvTime; + uint32_t UcodeLoadStatus; + uint8_t DPMFreezeAndForced; + uint8_t Activity_Weight; + uint8_t Reserved8[2]; + uint32_t Reserved; +}; + +typedef struct SMU71_SoftRegisters SMU71_SoftRegisters; + +struct SMU71_Firmware_Header +{ + uint32_t Digest[5]; + uint32_t Version; + uint32_t HeaderSize; + uint32_t Flags; + uint32_t EntryPoint; + uint32_t CodeSize; + uint32_t ImageSize; + + uint32_t Rtos; + uint32_t SoftRegisters; + uint32_t DpmTable; + uint32_t FanTable; + uint32_t CacConfigTable; + uint32_t CacStatusTable; + + uint32_t mcRegisterTable; + + uint32_t mcArbDramTimingTable; + + uint32_t PmFuseTable; + uint32_t Globals; + uint32_t UvdDpmTable; + uint32_t AcpDpmTable; + uint32_t VceDpmTable; + uint32_t SamuDpmTable; + uint32_t UlvSettings; + uint32_t Reserved[37]; + uint32_t Signature; +}; + +typedef struct SMU71_Firmware_Header SMU71_Firmware_Header; + +struct SMU7_HystController_Data +{ + uint8_t waterfall_up; + uint8_t waterfall_down; + uint8_t pstate; + uint8_t clamp_mode; +}; + +typedef struct SMU7_HystController_Data SMU7_HystController_Data; + +#define SMU71_FIRMWARE_HEADER_LOCATION 0x20000 + +enum DisplayConfig { + PowerDown = 1, + DP54x4, + DP54x2, + DP54x1, + DP27x4, + DP27x2, + DP27x1, + HDMI297, + HDMI162, + LVDS, + DP324x4, + DP324x2, + DP324x1 +}; + +//#define SX_BLOCK_COUNT 8 +//#define MC_BLOCK_COUNT 1 +//#define CPL_BLOCK_COUNT 27 + +#if defined SMU__VARIANT__ICELAND + #define SX_BLOCK_COUNT 8 + #define MC_BLOCK_COUNT 1 + #define CPL_BLOCK_COUNT 29 +#endif + +struct SMU7_Local_Cac { + uint8_t BlockId; + uint8_t SignalId; + uint8_t Threshold; + uint8_t Padding; +}; + +typedef struct SMU7_Local_Cac SMU7_Local_Cac; + +struct SMU7_Local_Cac_Table { + SMU7_Local_Cac SxLocalCac[SX_BLOCK_COUNT]; + SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT]; + SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT]; +}; + +typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table; + +#if !defined(SMC_MICROCODE) +#pragma pack(pop) +#endif + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h new file mode 100644 index 0000000..c0e3936 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h @@ -0,0 +1,631 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU71_DISCRETE_H +#define SMU71_DISCRETE_H + +#include "smu71.h" + +#if !defined(SMC_MICROCODE) +#pragma pack(push, 1) +#endif + +#define VDDC_ON_SVI2 0x1 +#define VDDCI_ON_SVI2 0x2 +#define MVDD_ON_SVI2 0x4 + +struct SMU71_Discrete_VoltageLevel +{ + uint16_t Voltage; + uint16_t StdVoltageHiSidd; + uint16_t StdVoltageLoSidd; + uint8_t Smio; + uint8_t padding; +}; + +typedef struct SMU71_Discrete_VoltageLevel SMU71_Discrete_VoltageLevel; + +struct SMU71_Discrete_GraphicsLevel +{ + uint32_t MinVddc; + uint32_t MinVddcPhases; + + uint32_t SclkFrequency; + + uint8_t pcieDpmLevel; + uint8_t DeepSleepDivId; + uint16_t ActivityLevel; + + uint32_t CgSpllFuncCntl3; + uint32_t CgSpllFuncCntl4; + uint32_t SpllSpreadSpectrum; + uint32_t SpllSpreadSpectrum2; + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; + uint8_t SclkDid; + uint8_t DisplayWatermark; + uint8_t EnabledForActivity; + uint8_t EnabledForThrottle; + uint8_t UpHyst; + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t PowerThrottle; +}; + +typedef struct SMU71_Discrete_GraphicsLevel SMU71_Discrete_GraphicsLevel; + +struct SMU71_Discrete_ACPILevel +{ + uint32_t Flags; + uint32_t MinVddc; + uint32_t MinVddcPhases; + uint32_t SclkFrequency; + uint8_t SclkDid; + uint8_t DisplayWatermark; + uint8_t DeepSleepDivId; + uint8_t padding; + uint32_t CgSpllFuncCntl; + uint32_t CgSpllFuncCntl2; + uint32_t CgSpllFuncCntl3; + uint32_t CgSpllFuncCntl4; + uint32_t SpllSpreadSpectrum; + uint32_t SpllSpreadSpectrum2; + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; +}; + +typedef struct SMU71_Discrete_ACPILevel SMU71_Discrete_ACPILevel; + +struct SMU71_Discrete_Ulv +{ + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; + uint16_t VddcOffset; + uint8_t VddcOffsetVid; + uint8_t VddcPhase; + uint32_t Reserved; +}; + +typedef struct SMU71_Discrete_Ulv SMU71_Discrete_Ulv; + +struct SMU71_Discrete_MemoryLevel +{ + uint32_t MinVddc; + uint32_t MinVddcPhases; + uint32_t MinVddci; + uint32_t MinMvdd; + + uint32_t MclkFrequency; + + uint8_t EdcReadEnable; + uint8_t EdcWriteEnable; + uint8_t RttEnable; + uint8_t StutterEnable; + + uint8_t StrobeEnable; + uint8_t StrobeRatio; + uint8_t EnabledForThrottle; + uint8_t EnabledForActivity; + + uint8_t UpHyst; + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t padding; + + uint16_t ActivityLevel; + uint8_t DisplayWatermark; + uint8_t padding1; + + uint32_t MpllFuncCntl; + uint32_t MpllFuncCntl_1; + uint32_t MpllFuncCntl_2; + uint32_t MpllAdFuncCntl; + uint32_t MpllDqFuncCntl; + uint32_t MclkPwrmgtCntl; + uint32_t DllCntl; + uint32_t MpllSs1; + uint32_t MpllSs2; +}; + +typedef struct SMU71_Discrete_MemoryLevel SMU71_Discrete_MemoryLevel; + +struct SMU71_Discrete_LinkLevel +{ + uint8_t PcieGenSpeed; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 + uint8_t PcieLaneCount; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint8_t EnabledForActivity; + uint8_t SPC; + uint32_t DownThreshold; + uint32_t UpThreshold; + uint32_t Reserved; +}; + +typedef struct SMU71_Discrete_LinkLevel SMU71_Discrete_LinkLevel; + + +#ifdef SMU__DYNAMIC_MCARB_SETTINGS +// MC ARB DRAM Timing registers. +struct SMU71_Discrete_MCArbDramTimingTableEntry +{ + uint32_t McArbDramTiming; + uint32_t McArbDramTiming2; + uint8_t McArbBurstTime; + uint8_t padding[3]; +}; + +typedef struct SMU71_Discrete_MCArbDramTimingTableEntry SMU71_Discrete_MCArbDramTimingTableEntry; + +struct SMU71_Discrete_MCArbDramTimingTable +{ + SMU71_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; +}; + +typedef struct SMU71_Discrete_MCArbDramTimingTable SMU71_Discrete_MCArbDramTimingTable; +#endif + +// UVD VCLK/DCLK state (level) definition. +struct SMU71_Discrete_UvdLevel +{ + uint32_t VclkFrequency; + uint32_t DclkFrequency; + uint16_t MinVddc; + uint8_t MinVddcPhases; + uint8_t VclkDivider; + uint8_t DclkDivider; + uint8_t padding[3]; +}; + +typedef struct SMU71_Discrete_UvdLevel SMU71_Discrete_UvdLevel; + +// Clocks for other external blocks (VCE, ACP, SAMU). +struct SMU71_Discrete_ExtClkLevel +{ + uint32_t Frequency; + uint16_t MinVoltage; + uint8_t MinPhases; + uint8_t Divider; +}; + +typedef struct SMU71_Discrete_ExtClkLevel SMU71_Discrete_ExtClkLevel; + +// Everything that we need to keep track of about the current state. +// Use this instead of copies of the GraphicsLevel and MemoryLevel structures to keep track of state parameters +// that need to be checked later. +// We don't need to cache everything about a state, just a few parameters. +struct SMU71_Discrete_StateInfo +{ + uint32_t SclkFrequency; + uint32_t MclkFrequency; + uint32_t VclkFrequency; + uint32_t DclkFrequency; + uint32_t SamclkFrequency; + uint32_t AclkFrequency; + uint32_t EclkFrequency; + uint16_t MvddVoltage; + uint16_t padding16; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + uint8_t McRegIndex; + uint8_t SeqIndex; + uint8_t SclkDid; + int8_t SclkIndex; + int8_t MclkIndex; + uint8_t PCIeGen; + +}; + +typedef struct SMU71_Discrete_StateInfo SMU71_Discrete_StateInfo; + + +struct SMU71_Discrete_DpmTable +{ + // Multi-DPM controller settings + SMU71_PIDController GraphicsPIDController; + SMU71_PIDController MemoryPIDController; + SMU71_PIDController LinkPIDController; + + uint32_t SystemFlags; + + // SMIO masks for voltage and phase controls + uint32_t SmioMaskVddcVid; + uint32_t SmioMaskVddcPhase; + uint32_t SmioMaskVddciVid; + uint32_t SmioMaskMvddVid; + + uint32_t VddcLevelCount; + uint32_t VddciLevelCount; + uint32_t MvddLevelCount; + + SMU71_Discrete_VoltageLevel VddcLevel [SMU71_MAX_LEVELS_VDDC]; + SMU71_Discrete_VoltageLevel VddciLevel [SMU71_MAX_LEVELS_VDDCI]; + SMU71_Discrete_VoltageLevel MvddLevel [SMU71_MAX_LEVELS_MVDD]; + + uint8_t GraphicsDpmLevelCount; + uint8_t MemoryDpmLevelCount; + uint8_t LinkLevelCount; + uint8_t MasterDeepSleepControl; + + uint32_t Reserved[5]; + + // State table entries for each DPM state + SMU71_Discrete_GraphicsLevel GraphicsLevel [SMU71_MAX_LEVELS_GRAPHICS]; + SMU71_Discrete_MemoryLevel MemoryACPILevel; + SMU71_Discrete_MemoryLevel MemoryLevel [SMU71_MAX_LEVELS_MEMORY]; + SMU71_Discrete_LinkLevel LinkLevel [SMU71_MAX_LEVELS_LINK]; + SMU71_Discrete_ACPILevel ACPILevel; + + uint32_t SclkStepSize; + uint32_t Smio [SMU71_MAX_ENTRIES_SMIO]; + + uint8_t GraphicsBootLevel; + uint8_t GraphicsVoltageChangeEnable; + uint8_t GraphicsThermThrottleEnable; + uint8_t GraphicsInterval; + + uint8_t VoltageInterval; + uint8_t ThermalInterval; + uint16_t TemperatureLimitHigh; + + uint16_t TemperatureLimitLow; + uint8_t MemoryBootLevel; + uint8_t MemoryVoltageChangeEnable; + + uint8_t MemoryInterval; + uint8_t MemoryThermThrottleEnable; + uint8_t MergedVddci; + uint8_t padding2; + + uint16_t VoltageResponseTime; + uint16_t PhaseResponseTime; + + uint8_t PCIeBootLinkLevel; + uint8_t PCIeGenInterval; + uint8_t DTEInterval; + uint8_t DTEMode; + + uint8_t SVI2Enable; + uint8_t VRHotGpio; + uint8_t AcDcGpio; + uint8_t ThermGpio; + + uint32_t DisplayCac; + + uint16_t MaxPwr; + uint16_t NomPwr; + + uint16_t FpsHighThreshold; + uint16_t FpsLowThreshold; + + uint16_t BAPMTI_R [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS]; + uint16_t BAPMTI_RC [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS]; + + uint8_t DTEAmbientTempBase; + uint8_t DTETjOffset; + uint8_t GpuTjMax; + uint8_t GpuTjHyst; + + uint16_t BootVddc; + uint16_t BootVddci; + + uint16_t BootMVdd; + uint16_t padding; + + uint32_t BAPM_TEMP_GRADIENT; + + uint32_t LowSclkInterruptThreshold; + uint32_t VddGfxReChkWait; + + uint16_t PPM_PkgPwrLimit; + uint16_t PPM_TemperatureLimit; + + uint16_t DefaultTdp; + uint16_t TargetTdp; +}; + +typedef struct SMU71_Discrete_DpmTable SMU71_Discrete_DpmTable; + +// --------------------------------------------------- AC Timing Parameters ------------------------------------------------ +#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE 16 +#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU71_MAX_LEVELS_MEMORY + +struct SMU71_Discrete_MCRegisterAddress +{ + uint16_t s0; + uint16_t s1; +}; + +typedef struct SMU71_Discrete_MCRegisterAddress SMU71_Discrete_MCRegisterAddress; + +struct SMU71_Discrete_MCRegisterSet +{ + uint32_t value[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +typedef struct SMU71_Discrete_MCRegisterSet SMU71_Discrete_MCRegisterSet; + +struct SMU71_Discrete_MCRegisters +{ + uint8_t last; + uint8_t reserved[3]; + SMU71_Discrete_MCRegisterAddress address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; + SMU71_Discrete_MCRegisterSet data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT]; +}; + +typedef struct SMU71_Discrete_MCRegisters SMU71_Discrete_MCRegisters; + + +// --------------------------------------------------- Fan Table ----------------------------------------------------------- +struct SMU71_Discrete_FanTable +{ + uint16_t FdoMode; + int16_t TempMin; + int16_t TempMed; + int16_t TempMax; + int16_t Slope1; + int16_t Slope2; + int16_t FdoMin; + int16_t HystUp; + int16_t HystDown; + int16_t HystSlope; + int16_t TempRespLim; + int16_t TempCurr; + int16_t SlopeCurr; + int16_t PwmCurr; + uint32_t RefreshPeriod; + int16_t FdoMax; + uint8_t TempSrc; + int8_t Padding; +}; + +typedef struct SMU71_Discrete_FanTable SMU71_Discrete_FanTable; + +#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4 +#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG) + +struct SMU71_MclkDpmScoreboard +{ + + uint32_t PercentageBusy; + + int32_t PIDError; + int32_t PIDIntegral; + int32_t PIDOutput; + + uint32_t SigmaDeltaAccum; + uint32_t SigmaDeltaOutput; + uint32_t SigmaDeltaLevel; + + uint32_t UtilizationSetpoint; + + uint8_t TdpClampMode; + uint8_t TdcClampMode; + uint8_t ThermClampMode; + uint8_t VoltageBusy; + + int8_t CurrLevel; + int8_t TargLevel; + uint8_t LevelChangeInProgress; + uint8_t UpHyst; + + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t DpmEnable; + uint8_t DpmRunning; + + uint8_t DpmForce; + uint8_t DpmForceLevel; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + + uint32_t MinimumPerfMclk; + + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t MclkSwitchInProgress; + uint8_t MclkSwitchCritical; + + uint8_t TargetMclkIndex; + uint8_t TargetMvddIndex; + uint8_t MclkSwitchResult; + + uint8_t EnabledLevelsChange; + + uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_MEMORY]; + uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_MEMORY]; + + void (*TargetStateCalculator)(uint8_t); + void (*SavedTargetStateCalculator)(uint8_t); + + uint16_t AutoDpmInterval; + uint16_t AutoDpmRange; + + uint16_t MclkSwitchingTime; + uint8_t padding[2]; +}; + +typedef struct SMU71_MclkDpmScoreboard SMU71_MclkDpmScoreboard; + +struct SMU71_UlvScoreboard +{ + uint8_t EnterUlv; + uint8_t ExitUlv; + uint8_t UlvActive; + uint8_t WaitingForUlv; + uint8_t UlvEnable; + uint8_t UlvRunning; + uint8_t UlvMasterEnable; + uint8_t padding; + uint32_t UlvAbortedCount; + uint32_t UlvTimeStamp; +}; + +typedef struct SMU71_UlvScoreboard SMU71_UlvScoreboard; + +struct SMU71_VddGfxScoreboard +{ + uint8_t VddGfxEnable; + uint8_t VddGfxActive; + uint8_t padding[2]; + + uint32_t VddGfxEnteredCount; + uint32_t VddGfxAbortedCount; +}; + +typedef struct SMU71_VddGfxScoreboard SMU71_VddGfxScoreboard; + +struct SMU71_AcpiScoreboard { + uint32_t SavedInterruptMask[2]; + uint8_t LastACPIRequest; + uint8_t CgBifResp; + uint8_t RequestType; + uint8_t Padding; + SMU71_Discrete_ACPILevel D0Level; +}; + +typedef struct SMU71_AcpiScoreboard SMU71_AcpiScoreboard; + + +struct SMU71_Discrete_PmFuses { + // dw0-dw1 + uint8_t BapmVddCVidHiSidd[8]; + + // dw2-dw3 + uint8_t BapmVddCVidLoSidd[8]; + + // dw4-dw5 + uint8_t VddCVid[8]; + + // dw6 + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t SviLoadLineTrimVddC; + uint8_t SviLoadLineOffsetVddC; + + // dw7 + uint16_t TDC_VDDC_PkgLimit; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + + // dw8 + uint8_t TdcWaterfallCtl; + uint8_t LPMLTemperatureMin; + uint8_t LPMLTemperatureMax; + uint8_t Reserved; + + // dw9-dw12 + uint8_t LPMLTemperatureScaler[16]; + + // dw13-dw14 + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t Reserved6; + + // dw15 + uint8_t GnbLPML[16]; + + // dw15 + uint8_t GnbLPMLMaxVid; + uint8_t GnbLPMLMinVid; + uint8_t Reserved1[2]; + + // dw16 + uint16_t BapmVddCBaseLeakageHiSidd; + uint16_t BapmVddCBaseLeakageLoSidd; +}; + +typedef struct SMU71_Discrete_PmFuses SMU71_Discrete_PmFuses; + +struct SMU71_Discrete_Log_Header_Table { + uint32_t version; + uint32_t asic_id; + uint16_t flags; + uint16_t entry_size; + uint32_t total_size; + uint32_t num_of_entries; + uint8_t type; + uint8_t mode; + uint8_t filler_0[2]; + uint32_t filler_1[2]; +}; + +typedef struct SMU71_Discrete_Log_Header_Table SMU71_Discrete_Log_Header_Table; + +struct SMU71_Discrete_Log_Cntl { + uint8_t Enabled; + uint8_t Type; + uint8_t padding[2]; + uint32_t BufferSize; + uint32_t SamplesLogged; + uint32_t SampleSize; + uint32_t AddrL; + uint32_t AddrH; +}; + +typedef struct SMU71_Discrete_Log_Cntl SMU71_Discrete_Log_Cntl; + +#if defined SMU__DGPU_ONLY + #define CAC_ACC_NW_NUM_OF_SIGNALS 83 +#endif + + +struct SMU71_Discrete_Cac_Collection_Table { + uint32_t temperature; + uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS]; + uint32_t filler[4]; +}; + +typedef struct SMU71_Discrete_Cac_Collection_Table SMU71_Discrete_Cac_Collection_Table; + +struct SMU71_Discrete_Cac_Verification_Table { + uint32_t VddcTotalPower; + uint32_t VddcLeakagePower; + uint32_t VddcConstantPower; + uint32_t VddcGfxDynamicPower; + uint32_t VddcUvdDynamicPower; + uint32_t VddcVceDynamicPower; + uint32_t VddcAcpDynamicPower; + uint32_t VddcPcieDynamicPower; + uint32_t VddcDceDynamicPower; + uint32_t VddcCurrent; + uint32_t VddcVoltage; + uint32_t VddciTotalPower; + uint32_t VddciLeakagePower; + uint32_t VddciConstantPower; + uint32_t VddciDynamicPower; + uint32_t Vddr1TotalPower; + uint32_t Vddr1LeakagePower; + uint32_t Vddr1ConstantPower; + uint32_t Vddr1DynamicPower; + uint32_t spare[8]; + uint32_t temperature; +}; + +typedef struct SMU71_Discrete_Cac_Verification_Table SMU71_Discrete_Cac_Verification_Table; + +#if !defined(SMC_MICROCODE) +#pragma pack(pop) +#endif + + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index f10fb64..19e7946 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -2,7 +2,8 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o +SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \ + polaris10_smumgr.o iceland_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c new file mode 100644 index 0000000..f506583 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -0,0 +1,713 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/gfp.h> + +#include "smumgr.h" +#include "iceland_smumgr.h" +#include "pp_debug.h" +#include "smu_ucode_xfer_vi.h" +#include "ppsmc.h" +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" +#include "cgs_common.h" + +#define ICELAND_SMC_SIZE 0x20000 +#define BUFFER_SIZE 80000 +#define MAX_STRING_SIZE 15 +#define BUFFER_SIZETWO 131072 /*128 *1024*/ + +/** + * Set the address for reading/writing the SMC SRAM space. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + */ +static int iceland_set_smc_sram_address(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t limit) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)), + "SMC address must be 4 byte aligned.", + return -1;); + + PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)), + "SMC address is beyond the SMC RAM area.", + return -1;); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + + return 0; +} + +/** + * Copy bytes from an array into the SMC RAM space. + * + * @param smumgr the address of the powerplay SMU manager. + * @param smcStartAddress the start address in the SMC RAM to copy bytes to. + * @param src the byte array to copy the bytes from. + * @param byteCount the number of bytes to copy. + */ +int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr, + uint32_t smcStartAddress, const uint8_t *src, + uint32_t byteCount, uint32_t limit) +{ + uint32_t addr; + uint32_t data, orig_data; + int result = 0; + uint32_t extra_shift; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)), + "SMC address must be 4 byte aligned.", + return 0;); + + PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)), + "SMC address is beyond the SMC RAM area.", + return 0;); + + addr = smcStartAddress; + + while (byteCount >= 4) { + /* + * Bytes are written into the + * SMC address space with the MSB first + */ + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + + result = iceland_set_smc_sram_address(smumgr, addr, limit); + + if (result) + goto out; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + + src += 4; + byteCount -= 4; + addr += 4; + } + + if (0 != byteCount) { + /* Now write odd bytes left, do a read modify write cycle */ + data = 0; + + result = iceland_set_smc_sram_address(smumgr, addr, limit); + if (result) + goto out; + + orig_data = cgs_read_register(smumgr->device, + mmSMC_IND_DATA_0); + extra_shift = 8 * (4 - byteCount); + + while (byteCount > 0) { + data = (data << 8) + *src++; + byteCount--; + } + + data <<= extra_shift; + data |= (orig_data & ~((~0UL) << extra_shift)); + + result = iceland_set_smc_sram_address(smumgr, addr, limit); + if (result) + goto out; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + } + +out: + return result; +} + +/** + * Deassert the reset'pin' (set it to high). + * + * @param smumgr the address of the powerplay hardware manager. + */ +static int iceland_start_smc(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + return 0; +} + +static void iceland_pp_reset_smc(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, + rst_reg, 1); +} + +int iceland_program_jump_on_start(struct pp_smumgr *smumgr) +{ + static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 }; + + iceland_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1); + + return 0; +} + +/** + * Return if the SMC is currently running. + * + * @param smumgr the address of the powerplay hardware manager. + */ +bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr) +{ + uint32_t val1, val2; + + val1 = SMUM_READ_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable); + val2 = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMC_PC_C); + + return ((0 == val1) && (0x20100 <= val2)); +} + +/** + * Send a message to the SMC, and wait for its response. + * + * @param smumgr the address of the powerplay hardware manager. + * @param msg the message to send. + * @return The response that came from the SMC. + */ +static int iceland_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + if (!iceland_is_smc_ram_running(smumgr)) + return -EINVAL; + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PP_ASSERT_WITH_CODE( + 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), + "Failed to send Previous Message.", + ); + + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PP_ASSERT_WITH_CODE( + 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), + "Failed to send Message.", + ); + + return 0; +} + +/** + * Send a message to the SMC with parameter + * + * @param smumgr: the address of the powerplay hardware manager. + * @param msg: the message to send. + * @param parameter: the parameter to send + * @return The response that came from the SMC. + */ +static int iceland_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + + return iceland_send_msg_to_smc(smumgr, msg); +} + +/* + * Read a 32bit value from the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value and output parameter for the data read from the SMC SRAM. + */ +int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t *value, + uint32_t limit) +{ + int result; + + result = iceland_set_smc_sram_address(smumgr, smcAddress, limit); + + if (0 != result) + return result; + + *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); + + return 0; +} + +/* + * Write a 32bit value to the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value to write to the SMC SRAM. + */ +int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t value, + uint32_t limit) +{ + int result; + + result = iceland_set_smc_sram_address(smumgr, smcAddress, limit); + + if (0 != result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value); + + return 0; +} + +static int iceland_smu_fini(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *priv = (struct iceland_smumgr *)(smumgr->backend); + + smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); + + if (smumgr->backend != NULL) { + kfree(smumgr->backend); + smumgr->backend = NULL; + } + + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); + return 0; +} + +static enum cgs_ucode_id iceland_convert_fw_type_to_cgs(uint32_t fw_type) +{ + enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; + + switch (fw_type) { + case UCODE_ID_SMU: + result = CGS_UCODE_ID_SMU; + break; + case UCODE_ID_SDMA0: + result = CGS_UCODE_ID_SDMA0; + break; + case UCODE_ID_SDMA1: + result = CGS_UCODE_ID_SDMA1; + break; + case UCODE_ID_CP_CE: + result = CGS_UCODE_ID_CP_CE; + break; + case UCODE_ID_CP_PFP: + result = CGS_UCODE_ID_CP_PFP; + break; + case UCODE_ID_CP_ME: + result = CGS_UCODE_ID_CP_ME; + break; + case UCODE_ID_CP_MEC: + result = CGS_UCODE_ID_CP_MEC; + break; + case UCODE_ID_CP_MEC_JT1: + result = CGS_UCODE_ID_CP_MEC_JT1; + break; + case UCODE_ID_CP_MEC_JT2: + result = CGS_UCODE_ID_CP_MEC_JT2; + break; + case UCODE_ID_RLC_G: + result = CGS_UCODE_ID_RLC_G; + break; + default: + break; + } + + return result; +} + +/** + * Convert the PPIRI firmware type to SMU type mask. + * For MEC, we need to check all MEC related type + */ +static uint16_t iceland_get_mask_for_firmware_type(uint16_t firmwareType) +{ + uint16_t result = 0; + + switch (firmwareType) { + case UCODE_ID_SDMA0: + result = UCODE_ID_SDMA0_MASK; + break; + case UCODE_ID_SDMA1: + result = UCODE_ID_SDMA1_MASK; + break; + case UCODE_ID_CP_CE: + result = UCODE_ID_CP_CE_MASK; + break; + case UCODE_ID_CP_PFP: + result = UCODE_ID_CP_PFP_MASK; + break; + case UCODE_ID_CP_ME: + result = UCODE_ID_CP_ME_MASK; + break; + case UCODE_ID_CP_MEC: + case UCODE_ID_CP_MEC_JT1: + case UCODE_ID_CP_MEC_JT2: + result = UCODE_ID_CP_MEC_MASK; + break; + case UCODE_ID_RLC_G: + result = UCODE_ID_RLC_G_MASK; + break; + default: + break; + } + + return result; +} + +/** + * Check if the FW has been loaded, + * SMU will not return if loading has not finished. +*/ +static int iceland_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType) +{ + uint16_t fwMask = iceland_get_mask_for_firmware_type(fwType); + + if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND, + SOFT_REGISTERS_TABLE_27, fwMask, fwMask)) { + pr_err("[ powerplay ] check firmware loading failed\n"); + return -EINVAL; + } + + return 0; +} + +/* Populate one firmware image to the data structure */ +static int iceland_populate_single_firmware_entry(struct pp_smumgr *smumgr, + uint16_t firmware_type, + struct SMU_Entry *pentry) +{ + int result; + struct cgs_firmware_info info = {0}; + + result = cgs_get_firmware_info( + smumgr->device, + iceland_convert_fw_type_to_cgs(firmware_type), + &info); + + if (result == 0) { + pentry->version = 0; + pentry->id = (uint16_t)firmware_type; + pentry->image_addr_high = smu_upper_32_bits(info.mc_addr); + pentry->image_addr_low = smu_lower_32_bits(info.mc_addr); + pentry->meta_data_addr_high = 0; + pentry->meta_data_addr_low = 0; + pentry->data_size_byte = info.image_size; + pentry->num_register_entries = 0; + + if (firmware_type == UCODE_ID_RLC_G) + pentry->flags = 1; + else + pentry->flags = 0; + } else { + return result; + } + + return result; +} + +static void iceland_pp_stop_smc_clock(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, + ck_disable, 1); +} + +static void iceland_start_smc_clock(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, + ck_disable, 0); +} + +int iceland_smu_start_smc(struct pp_smumgr *smumgr) +{ + /* set smc instruct start point at 0x0 */ + iceland_program_jump_on_start(smumgr); + + /* enable smc clock */ + iceland_start_smc_clock(smumgr); + + /* de-assert reset */ + iceland_start_smc(smumgr); + + SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, + INTERRUPTS_ENABLED, 1); + + return 0; +} + +/** + * Upload the SMC firmware to the SMC microcontroller. + * + * @param smumgr the address of the powerplay hardware manager. + * @param pFirmware the data structure containing the various sections of the firmware. + */ +int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) +{ + const uint8_t *src; + uint32_t byte_count, val; + uint32_t data; + struct cgs_firmware_info info = {0}; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + /* load SMC firmware */ + cgs_get_firmware_info(smumgr->device, + iceland_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); + + if (info.image_size & 3) { + pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n"); + return -EINVAL; + } + + if (info.image_size > ICELAND_SMC_SIZE) { + pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n"); + return -EINVAL; + } + + /* wait for smc boot up */ + SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + RCU_UC_EVENTS, boot_seq_done, 0); + + /* clear firmware interrupt enable flag */ + val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMC_SYSCON_MISC_CNTL); + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMC_SYSCON_MISC_CNTL, val | 1); + + /* stop smc clock */ + iceland_pp_stop_smc_clock(smumgr); + + /* reset smc */ + iceland_pp_reset_smc(smumgr); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, + info.ucode_start_address); + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, + AUTO_INCREMENT_IND_0, 1); + + byte_count = info.image_size; + src = (const uint8_t *)info.kptr; + + while (byte_count >= 4) { + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + src += 4; + byte_count -= 4; + } + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, + AUTO_INCREMENT_IND_0, 0); + + return 0; +} + +static int iceland_request_smu_reload_fw(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *iceland_smu = + (struct iceland_smumgr *)(smumgr->backend); + uint16_t fw_to_load; + int result = 0; + struct SMU_DRAMData_TOC *toc; + + toc = (struct SMU_DRAMData_TOC *)iceland_smu->pHeader; + toc->num_entries = 0; + toc->structure_version = 1; + + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry(smumgr, + UCODE_ID_RLC_G, + &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", + return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_CE, + &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", + return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + + if (!iceland_is_smc_ram_running(smumgr)) { + result = iceland_smu_upload_firmware_image(smumgr); + if (result) + return result; + + result = iceland_smu_start_smc(smumgr); + if (result) + return result; + } + + iceland_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DRV_DRAM_ADDR_HI, + iceland_smu->header_buffer.mc_addr_high); + + iceland_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DRV_DRAM_ADDR_LO, + iceland_smu->header_buffer.mc_addr_low); + + fw_to_load = UCODE_ID_RLC_G_MASK + + UCODE_ID_SDMA0_MASK + + UCODE_ID_SDMA1_MASK + + UCODE_ID_CP_CE_MASK + + UCODE_ID_CP_ME_MASK + + UCODE_ID_CP_PFP_MASK + + UCODE_ID_CP_MEC_MASK + + UCODE_ID_CP_MEC_JT1_MASK + + UCODE_ID_CP_MEC_JT2_MASK; + + PP_ASSERT_WITH_CODE( + 0 == iceland_send_msg_to_smc_with_parameter( + smumgr, PPSMC_MSG_LoadUcodes, fw_to_load), + "Fail to Request SMU Load uCode", return 0); + + return result; +} + +static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr, + uint32_t firmwareType) +{ + return 0; +} + +static int iceland_start_smu(struct pp_smumgr *smumgr) +{ + int result; + + result = iceland_smu_upload_firmware_image(smumgr); + if (result) + return result; + + result = iceland_smu_start_smc(smumgr); + if (result) + return result; + + result = iceland_request_smu_reload_fw(smumgr); + + return result; +} + +/** + * Write a 32bit value to the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value to write to the SMC SRAM. + */ +static int iceland_smu_init(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *iceland_smu; + uint64_t mc_addr = 0; + + /* Allocate memory for backend private data */ + iceland_smu = (struct iceland_smumgr *)(smumgr->backend); + iceland_smu->header_buffer.data_size = + ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; + + smu_allocate_memory(smumgr->device, + iceland_smu->header_buffer.data_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &iceland_smu->header_buffer.kaddr, + &iceland_smu->header_buffer.handle); + + iceland_smu->pHeader = iceland_smu->header_buffer.kaddr; + iceland_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + iceland_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + PP_ASSERT_WITH_CODE((NULL != iceland_smu->pHeader), + "Out of memory.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)iceland_smu->header_buffer.handle); + return -1); + + return 0; +} + +static const struct pp_smumgr_func iceland_smu_funcs = { + .smu_init = &iceland_smu_init, + .smu_fini = &iceland_smu_fini, + .start_smu = &iceland_start_smu, + .check_fw_load_finish = &iceland_check_fw_load_finish, + .request_smu_load_fw = &iceland_request_smu_reload_fw, + .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw, + .send_msg_to_smc = &iceland_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &iceland_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, +}; + +int iceland_smum_init(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *iceland_smu = NULL; + + iceland_smu = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL); + + if (iceland_smu == NULL) + return -ENOMEM; + + smumgr->backend = iceland_smu; + smumgr->smumgr_funcs = &iceland_smu_funcs; + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h new file mode 100644 index 0000000..62009a7 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h @@ -0,0 +1,64 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ + +#ifndef _ICELAND_SMUMGR_H_ +#define _ICELAND_SMUMGR_H_ + +struct iceland_buffer_entry { + uint32_t data_size; + uint32_t mc_addr_low; + uint32_t mc_addr_high; + void *kaddr; + unsigned long handle; +}; + +/* Iceland only has header_buffer, don't have smu buffer. */ +struct iceland_smumgr { + uint8_t *pHeader; + uint8_t *pMecImage; + uint32_t ulSoftRegsStart; + + struct iceland_buffer_entry header_buffer; +}; + +extern int iceland_smum_init(struct pp_smumgr *smumgr); +extern int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr, + uint32_t smcStartAddress, + const uint8_t *src, + uint32_t byteCount, uint32_t limit); + +extern int iceland_smu_start_smc(struct pp_smumgr *smumgr); + +extern int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, + uint32_t *value, uint32_t limit); +extern int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, + uint32_t value, uint32_t limit); + +extern bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr); +extern int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 5dba7c5..704ff4c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -978,7 +978,7 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) return 0; } -static const struct pp_smumgr_func ellsemere_smu_funcs = { +static const struct pp_smumgr_func polaris10_smu_funcs = { .smu_init = polaris10_smu_init, .smu_fini = polaris10_smu_fini, .start_smu = polaris10_start_smu, @@ -1001,7 +1001,7 @@ int polaris10_smum_init(struct pp_smumgr *smumgr) return -1; smumgr->backend = polaris10_smu; - smumgr->smumgr_funcs = &ellsemere_smu_funcs; + smumgr->smumgr_funcs = &polaris10_smu_funcs; return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 7723473..cf3cabe 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -30,6 +30,7 @@ #include "linux/delay.h" #include "cz_smumgr.h" #include "tonga_smumgr.h" +#include "iceland_smumgr.h" #include "fiji_smumgr.h" #include "polaris10_smumgr.h" @@ -58,6 +59,9 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case AMDGPU_FAMILY_VI: switch (smumgr->chip_id) { + case CHIP_TOPAZ: + iceland_smum_init(smumgr); + break; case CHIP_TONGA: tonga_smum_init(smumgr); break; diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 32715da..efac8ab 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -97,6 +97,83 @@ static int analogix_dp_detect_hpd(struct analogix_dp_device *dp) return 0; } +int analogix_dp_enable_psr(struct device *dev) +{ + struct analogix_dp_device *dp = dev_get_drvdata(dev); + struct edp_vsc_psr psr_vsc; + + if (!dp->psr_support) + return -EINVAL; + + /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */ + memset(&psr_vsc, 0, sizeof(psr_vsc)); + psr_vsc.sdp_header.HB0 = 0; + psr_vsc.sdp_header.HB1 = 0x7; + psr_vsc.sdp_header.HB2 = 0x2; + psr_vsc.sdp_header.HB3 = 0x8; + + psr_vsc.DB0 = 0; + psr_vsc.DB1 = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID; + + analogix_dp_send_psr_spd(dp, &psr_vsc); + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_enable_psr); + +int analogix_dp_disable_psr(struct device *dev) +{ + struct analogix_dp_device *dp = dev_get_drvdata(dev); + struct edp_vsc_psr psr_vsc; + + if (!dp->psr_support) + return -EINVAL; + + /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */ + memset(&psr_vsc, 0, sizeof(psr_vsc)); + psr_vsc.sdp_header.HB0 = 0; + psr_vsc.sdp_header.HB1 = 0x7; + psr_vsc.sdp_header.HB2 = 0x2; + psr_vsc.sdp_header.HB3 = 0x8; + + psr_vsc.DB0 = 0; + psr_vsc.DB1 = 0; + + analogix_dp_send_psr_spd(dp, &psr_vsc); + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_disable_psr); + +static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp) +{ + unsigned char psr_version; + + analogix_dp_read_byte_from_dpcd(dp, DP_PSR_SUPPORT, &psr_version); + dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version); + + return (psr_version & DP_PSR_IS_SUPPORTED) ? true : false; +} + +static void analogix_dp_enable_sink_psr(struct analogix_dp_device *dp) +{ + unsigned char psr_en; + + /* Disable psr function */ + analogix_dp_read_byte_from_dpcd(dp, DP_PSR_EN_CFG, &psr_en); + psr_en &= ~DP_PSR_ENABLE; + analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en); + + /* Main-Link transmitter remains active during PSR active states */ + psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION; + analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en); + + /* Enable psr function */ + psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE | + DP_PSR_CRC_VERIFICATION; + analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en); + + analogix_dp_enable_psr_crc(dp); +} + static unsigned char analogix_dp_calc_edid_check_sum(unsigned char *edid_data) { int i; @@ -921,13 +998,69 @@ static void analogix_dp_commit(struct analogix_dp_device *dp) /* Enable video */ analogix_dp_start_video(dp); + + dp->psr_support = analogix_dp_detect_sink_psr(dp); + if (dp->psr_support) + analogix_dp_enable_sink_psr(dp); +} + +/* + * This function is a bit of a catch-all for panel preparation, hopefully + * simplifying the logic of functions that need to prepare/unprepare the panel + * below. + * + * If @prepare is true, this function will prepare the panel. Conversely, if it + * is false, the panel will be unprepared. + * + * If @is_modeset_prepare is true, the function will disregard the current state + * of the panel and either prepare/unprepare the panel based on @prepare. Once + * it finishes, it will update dp->panel_is_modeset to reflect the current state + * of the panel. + */ +static int analogix_dp_prepare_panel(struct analogix_dp_device *dp, + bool prepare, bool is_modeset_prepare) +{ + int ret = 0; + + if (!dp->plat_data->panel) + return 0; + + mutex_lock(&dp->panel_lock); + + /* + * Exit early if this is a temporary prepare/unprepare and we're already + * modeset (since we neither want to prepare twice or unprepare early). + */ + if (dp->panel_is_modeset && !is_modeset_prepare) + goto out; + + if (prepare) + ret = drm_panel_prepare(dp->plat_data->panel); + else + ret = drm_panel_unprepare(dp->plat_data->panel); + + if (ret) + goto out; + + if (is_modeset_prepare) + dp->panel_is_modeset = prepare; + +out: + mutex_unlock(&dp->panel_lock); + return ret; } int analogix_dp_get_modes(struct drm_connector *connector) { struct analogix_dp_device *dp = to_dp(connector); struct edid *edid = (struct edid *)dp->edid; - int num_modes = 0; + int ret, num_modes = 0; + + ret = analogix_dp_prepare_panel(dp, true, false); + if (ret) { + DRM_ERROR("Failed to prepare panel (%d)\n", ret); + return 0; + } if (analogix_dp_handle_edid(dp) == 0) { drm_mode_connector_update_edid_property(&dp->connector, edid); @@ -940,6 +1073,10 @@ int analogix_dp_get_modes(struct drm_connector *connector) if (dp->plat_data->get_modes) num_modes += dp->plat_data->get_modes(dp->plat_data, connector); + ret = analogix_dp_prepare_panel(dp, false, false); + if (ret) + DRM_ERROR("Failed to unprepare panel (%d)\n", ret); + return num_modes; } @@ -960,11 +1097,23 @@ enum drm_connector_status analogix_dp_detect(struct drm_connector *connector, bool force) { struct analogix_dp_device *dp = to_dp(connector); + enum drm_connector_status status = connector_status_disconnected; + int ret; - if (analogix_dp_detect_hpd(dp)) + ret = analogix_dp_prepare_panel(dp, true, false); + if (ret) { + DRM_ERROR("Failed to prepare panel (%d)\n", ret); return connector_status_disconnected; + } + + if (!analogix_dp_detect_hpd(dp)) + status = connector_status_connected; - return connector_status_connected; + ret = analogix_dp_prepare_panel(dp, false, false); + if (ret) + DRM_ERROR("Failed to unprepare panel (%d)\n", ret); + + return status; } static void analogix_dp_connector_destroy(struct drm_connector *connector) @@ -1035,6 +1184,16 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge) return 0; } +static void analogix_dp_bridge_pre_enable(struct drm_bridge *bridge) +{ + struct analogix_dp_device *dp = bridge->driver_private; + int ret; + + ret = analogix_dp_prepare_panel(dp, true, true); + if (ret) + DRM_ERROR("failed to setup the panel ret = %d\n", ret); +} + static void analogix_dp_bridge_enable(struct drm_bridge *bridge) { struct analogix_dp_device *dp = bridge->driver_private; @@ -1058,6 +1217,7 @@ static void analogix_dp_bridge_enable(struct drm_bridge *bridge) static void analogix_dp_bridge_disable(struct drm_bridge *bridge) { struct analogix_dp_device *dp = bridge->driver_private; + int ret; if (dp->dpms_mode != DRM_MODE_DPMS_ON) return; @@ -1077,6 +1237,10 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge) pm_runtime_put_sync(dp->dev); + ret = analogix_dp_prepare_panel(dp, false, true); + if (ret) + DRM_ERROR("failed to setup the panel ret = %d\n", ret); + dp->dpms_mode = DRM_MODE_DPMS_OFF; } @@ -1165,9 +1329,9 @@ static void analogix_dp_bridge_nop(struct drm_bridge *bridge) } static const struct drm_bridge_funcs analogix_dp_bridge_funcs = { + .pre_enable = analogix_dp_bridge_pre_enable, .enable = analogix_dp_bridge_enable, .disable = analogix_dp_bridge_disable, - .pre_enable = analogix_dp_bridge_nop, .post_disable = analogix_dp_bridge_nop, .mode_set = analogix_dp_bridge_mode_set, .attach = analogix_dp_bridge_attach, @@ -1254,6 +1418,9 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, dp->dev = &pdev->dev; dp->dpms_mode = DRM_MODE_DPMS_OFF; + mutex_init(&dp->panel_lock); + dp->panel_is_modeset = false; + /* * platform dp driver need containor_of the plat_data to get * the driver private data, so we need to store the point of @@ -1333,13 +1500,6 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, phy_power_on(dp->phy); - if (dp->plat_data->panel) { - if (drm_panel_prepare(dp->plat_data->panel)) { - DRM_ERROR("failed to setup the panel\n"); - return -EBUSY; - } - } - analogix_dp_init_dp(dp); ret = devm_request_threaded_irq(&pdev->dev, dp->irq, diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h index b456380..473b980 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h @@ -177,6 +177,10 @@ struct analogix_dp_device { int hpd_gpio; bool force_hpd; unsigned char edid[EDID_BLOCK_LENGTH * 2]; + bool psr_support; + + struct mutex panel_lock; + bool panel_is_modeset; struct analogix_dp_plat_data *plat_data; }; @@ -278,4 +282,8 @@ int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp); void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp); void analogix_dp_enable_scrambling(struct analogix_dp_device *dp); void analogix_dp_disable_scrambling(struct analogix_dp_device *dp); +void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp); +void analogix_dp_send_psr_spd(struct analogix_dp_device *dp, + struct edp_vsc_psr *vsc); + #endif /* _ANALOGIX_DP_CORE_H */ diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index 48030f0..52c1b6b 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -1322,3 +1322,54 @@ void analogix_dp_disable_scrambling(struct analogix_dp_device *dp) reg |= SCRAMBLING_DISABLE; writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); } + +void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp) +{ + writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON); +} + +void analogix_dp_send_psr_spd(struct analogix_dp_device *dp, + struct edp_vsc_psr *vsc) +{ + unsigned int val; + + /* don't send info frame */ + val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val &= ~IF_EN; + writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + + /* configure single frame update mode */ + writel(PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE, + dp->reg_base + ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL); + + /* configure VSC HB0~HB3 */ + writel(vsc->sdp_header.HB0, dp->reg_base + ANALOGIX_DP_SPD_HB0); + writel(vsc->sdp_header.HB1, dp->reg_base + ANALOGIX_DP_SPD_HB1); + writel(vsc->sdp_header.HB2, dp->reg_base + ANALOGIX_DP_SPD_HB2); + writel(vsc->sdp_header.HB3, dp->reg_base + ANALOGIX_DP_SPD_HB3); + + /* configure reused VSC PB0~PB3, magic number from vendor */ + writel(0x00, dp->reg_base + ANALOGIX_DP_SPD_PB0); + writel(0x16, dp->reg_base + ANALOGIX_DP_SPD_PB1); + writel(0xCE, dp->reg_base + ANALOGIX_DP_SPD_PB2); + writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3); + + /* configure DB0 / DB1 values */ + writel(vsc->DB0, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0); + writel(vsc->DB1, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1); + + /* set reuse spd inforframe */ + val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + val |= REUSE_SPD_EN; + writel(val, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + + /* mark info frame update */ + val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val = (val | IF_UP) & ~IF_EN; + writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + + /* send info frame */ + val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val |= IF_EN; + writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); +} diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h index cdcc6c5..40200c6 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h @@ -22,6 +22,8 @@ #define ANALOGIX_DP_VIDEO_CTL_8 0x3C #define ANALOGIX_DP_VIDEO_CTL_10 0x44 +#define ANALOGIX_DP_SPDIF_AUDIO_CTL_0 0xD8 + #define ANALOGIX_DP_PLL_REG_1 0xfc #define ANALOGIX_DP_PLL_REG_2 0x9e4 #define ANALOGIX_DP_PLL_REG_3 0x9e8 @@ -30,6 +32,21 @@ #define ANALOGIX_DP_PD 0x12c +#define ANALOGIX_DP_IF_TYPE 0x244 +#define ANALOGIX_DP_IF_PKT_DB1 0x254 +#define ANALOGIX_DP_IF_PKT_DB2 0x258 +#define ANALOGIX_DP_SPD_HB0 0x2F8 +#define ANALOGIX_DP_SPD_HB1 0x2FC +#define ANALOGIX_DP_SPD_HB2 0x300 +#define ANALOGIX_DP_SPD_HB3 0x304 +#define ANALOGIX_DP_SPD_PB0 0x308 +#define ANALOGIX_DP_SPD_PB1 0x30C +#define ANALOGIX_DP_SPD_PB2 0x310 +#define ANALOGIX_DP_SPD_PB3 0x314 +#define ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL 0x318 +#define ANALOGIX_DP_VSC_SHADOW_DB0 0x31C +#define ANALOGIX_DP_VSC_SHADOW_DB1 0x320 + #define ANALOGIX_DP_LANE_MAP 0x35C #define ANALOGIX_DP_ANALOG_CTL_1 0x370 @@ -103,6 +120,8 @@ #define ANALOGIX_DP_SOC_GENERAL_CTL 0x800 +#define ANALOGIX_DP_CRC_CON 0x890 + /* ANALOGIX_DP_TX_SW_RESET */ #define RESET_DP_TX (0x1 << 0) @@ -151,6 +170,7 @@ #define VID_CHK_UPDATE_TYPE_SHIFT (4) #define VID_CHK_UPDATE_TYPE_1 (0x1 << 4) #define VID_CHK_UPDATE_TYPE_0 (0x0 << 4) +#define REUSE_SPD_EN (0x1 << 3) /* ANALOGIX_DP_VIDEO_CTL_8 */ #define VID_HRES_TH(x) (((x) & 0xf) << 4) @@ -167,6 +187,12 @@ #define REF_CLK_27M (0x0 << 0) #define REF_CLK_MASK (0x1 << 0) +/* ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL */ +#define PSR_FRAME_UP_TYPE_BURST (0x1 << 0) +#define PSR_FRAME_UP_TYPE_SINGLE (0x0 << 0) +#define PSR_CRC_SEL_HARDWARE (0x1 << 1) +#define PSR_CRC_SEL_MANUALLY (0x0 << 1) + /* ANALOGIX_DP_LANE_MAP */ #define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6) #define LANE3_MAP_LOGIC_LANE_1 (0x1 << 6) @@ -376,4 +402,12 @@ #define VIDEO_MODE_SLAVE_MODE (0x1 << 0) #define VIDEO_MODE_MASTER_MODE (0x0 << 0) +/* ANALOGIX_DP_PKT_SEND_CTL */ +#define IF_UP (0x1 << 4) +#define IF_EN (0x1 << 0) + +/* ANALOGIX_DP_CRC_CON */ +#define PSR_VID_CRC_FLUSH (0x1 << 2) +#define PSR_VID_CRC_ENABLE (0x1 << 0) + #endif /* _ANALOGIX_DP_REG_H */ diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index b95c48ac..a33dab2 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1911,14 +1911,23 @@ out: int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_mode_crtc_page_flip *page_flip = data; + struct drm_mode_crtc_page_flip_target *page_flip = data; struct drm_crtc *crtc; struct drm_framebuffer *fb = NULL; struct drm_pending_vblank_event *e = NULL; + u32 target_vblank = page_flip->sequence; int ret = -EINVAL; - if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || - page_flip->reserved != 0) + if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS) + return -EINVAL; + + if (page_flip->sequence != 0 && !(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) + return -EINVAL; + + /* Only one of the DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags + * can be specified + */ + if ((page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) == DRM_MODE_PAGE_FLIP_TARGET) return -EINVAL; if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip) @@ -1928,6 +1937,45 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, if (!crtc) return -ENOENT; + if (crtc->funcs->page_flip_target) { + u32 current_vblank; + int r; + + r = drm_crtc_vblank_get(crtc); + if (r) + return r; + + current_vblank = drm_crtc_vblank_count(crtc); + + switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) { + case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE: + if ((int)(target_vblank - current_vblank) > 1) { + DRM_DEBUG("Invalid absolute flip target %u, " + "must be <= %u\n", target_vblank, + current_vblank + 1); + drm_crtc_vblank_put(crtc); + return -EINVAL; + } + break; + case DRM_MODE_PAGE_FLIP_TARGET_RELATIVE: + if (target_vblank != 0 && target_vblank != 1) { + DRM_DEBUG("Invalid relative flip target %u, " + "must be 0 or 1\n", target_vblank); + drm_crtc_vblank_put(crtc); + return -EINVAL; + } + target_vblank += current_vblank; + break; + default: + target_vblank = current_vblank + + !(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC); + break; + } + } else if (crtc->funcs->page_flip == NULL || + (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) { + return -EINVAL; + } + drm_modeset_lock_crtc(crtc, crtc->primary); if (crtc->primary->fb == NULL) { /* The framebuffer is currently unbound, presumably @@ -1938,9 +1986,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, goto out; } - if (crtc->funcs->page_flip == NULL) - goto out; - fb = drm_framebuffer_lookup(dev, page_flip->fb_id); if (!fb) { ret = -ENOENT; @@ -1981,7 +2026,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, } crtc->primary->old_fb = crtc->primary->fb; - ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); + if (crtc->funcs->page_flip_target) + ret = crtc->funcs->page_flip_target(crtc, fb, e, + page_flip->flags, + target_vblank); + else + ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); if (ret) { if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) drm_event_cancel_free(dev, &e->base); @@ -1994,6 +2044,8 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, } out: + if (ret) + drm_crtc_vblank_put(crtc); if (fb) drm_framebuffer_unreference(fb); if (crtc->primary->old_fb) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 6307546..b97c421 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -227,6 +227,7 @@ static int drm_getstats(struct drm_device *dev, void *data, static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_get_cap *req = data; + struct drm_crtc *crtc; req->value = 0; switch (req->capability) { @@ -253,6 +254,13 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ case DRM_CAP_ASYNC_PAGE_FLIP: req->value = dev->mode_config.async_page_flip; break; + case DRM_CAP_PAGE_FLIP_TARGET: + req->value = 1; + drm_for_each_crtc(crtc, dev) { + if (!crtc->funcs->page_flip_target) + req->value = 0; + } + break; case DRM_CAP_CURSOR_WIDTH: if (dev->mode_config.cursor_width) req->value = dev->mode_config.cursor_width; diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index dda724f..a7da246 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -3,12 +3,16 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror +subdir-ccflags-y += \ + $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA) # Please keep these build lists sorted! # core driver code i915-y := i915_drv.o \ i915_irq.o \ + i915_memcpy.o \ + i915_mm.o \ i915_params.o \ i915_pci.o \ i915_suspend.o \ @@ -110,6 +114,6 @@ i915-y += intel_gvt.o include $(src)/gvt/Makefile endif -obj-$(CONFIG_DRM_I915) += i915.o +obj-$(CONFIG_DRM_I915) += i915.o CFLAGS_i915_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 1db829c..3c72b3b 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -86,24 +86,25 @@ * general bitmasking mechanism. */ -#define STD_MI_OPCODE_MASK 0xFF800000 -#define STD_3D_OPCODE_MASK 0xFFFF0000 -#define STD_2D_OPCODE_MASK 0xFFC00000 -#define STD_MFX_OPCODE_MASK 0xFFFF0000 +#define STD_MI_OPCODE_SHIFT (32 - 9) +#define STD_3D_OPCODE_SHIFT (32 - 16) +#define STD_2D_OPCODE_SHIFT (32 - 10) +#define STD_MFX_OPCODE_SHIFT (32 - 16) +#define MIN_OPCODE_SHIFT 16 #define CMD(op, opm, f, lm, fl, ...) \ { \ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ - .cmd = { (op), (opm) }, \ + .cmd = { (op), ~0u << (opm) }, \ .length = { (lm) }, \ __VA_ARGS__ \ } /* Convenience macros to compress the tables */ -#define SMI STD_MI_OPCODE_MASK -#define S3D STD_3D_OPCODE_MASK -#define S2D STD_2D_OPCODE_MASK -#define SMFX STD_MFX_OPCODE_MASK +#define SMI STD_MI_OPCODE_SHIFT +#define S3D STD_3D_OPCODE_SHIFT +#define S2D STD_2D_OPCODE_SHIFT +#define SMFX STD_MFX_OPCODE_SHIFT #define F true #define S CMD_DESC_SKIP #define R CMD_DESC_REJECT @@ -350,6 +351,9 @@ static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = { CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), }; +static const struct drm_i915_cmd_descriptor noop_desc = + CMD(MI_NOOP, SMI, F, 1, S); + #undef CMD #undef SMI #undef S3D @@ -458,6 +462,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG32(GEN7_GPGPU_DISPATCHDIMX), REG32(GEN7_GPGPU_DISPATCHDIMY), REG32(GEN7_GPGPU_DISPATCHDIMZ), + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2), @@ -473,6 +478,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG32(GEN7_L3SQCREG1), REG32(GEN7_L3CNTLREG2), REG32(GEN7_L3CNTLREG3), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), }; static const struct drm_i915_reg_descriptor hsw_render_regs[] = { @@ -502,7 +508,10 @@ static const struct drm_i915_reg_descriptor hsw_render_regs[] = { }; static const struct drm_i915_reg_descriptor gen7_blt_regs[] = { + REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), REG32(BCS_SWCTRL), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), }; static const struct drm_i915_reg_descriptor ivb_master_regs[] = { @@ -691,12 +700,26 @@ struct cmd_node { * non-opcode bits being set. But if we don't include those bits, some 3D * commands may hash to the same bucket due to not including opcode bits that * make the command unique. For now, we will risk hashing to the same bucket. - * - * If we attempt to generate a perfect hash, we should be able to look at bits - * 31:29 of a command from a batch buffer and use the full mask for that - * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this. */ -#define CMD_HASH_MASK STD_MI_OPCODE_MASK +static inline u32 cmd_header_key(u32 x) +{ + u32 shift; + + switch (x >> INSTR_CLIENT_SHIFT) { + default: + case INSTR_MI_CLIENT: + shift = STD_MI_OPCODE_SHIFT; + break; + case INSTR_RC_CLIENT: + shift = STD_3D_OPCODE_SHIFT; + break; + case INSTR_BC_CLIENT: + shift = STD_2D_OPCODE_SHIFT; + break; + } + + return x >> shift; +} static int init_hash_table(struct intel_engine_cs *engine, const struct drm_i915_cmd_table *cmd_tables, @@ -720,7 +743,7 @@ static int init_hash_table(struct intel_engine_cs *engine, desc_node->desc = desc; hash_add(engine->cmd_hash, &desc_node->node, - desc->cmd.value & CMD_HASH_MASK); + cmd_header_key(desc->cmd.value)); } } @@ -746,17 +769,15 @@ static void fini_hash_table(struct intel_engine_cs *engine) * Optionally initializes fields related to batch buffer command parsing in the * struct intel_engine_cs based on whether the platform requires software * command parsing. - * - * Return: non-zero if initialization fails */ -int intel_engine_init_cmd_parser(struct intel_engine_cs *engine) +void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) { const struct drm_i915_cmd_table *cmd_tables; int cmd_table_count; int ret; if (!IS_GEN7(engine->i915)) - return 0; + return; switch (engine->id) { case RCS: @@ -811,24 +832,27 @@ int intel_engine_init_cmd_parser(struct intel_engine_cs *engine) break; default: MISSING_CASE(engine->id); - BUG(); + return; } - BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)); - BUG_ON(!validate_regs_sorted(engine)); - - WARN_ON(!hash_empty(engine->cmd_hash)); + if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) { + DRM_ERROR("%s: command descriptions are not sorted\n", + engine->name); + return; + } + if (!validate_regs_sorted(engine)) { + DRM_ERROR("%s: registers are not sorted\n", engine->name); + return; + } ret = init_hash_table(engine, cmd_tables, cmd_table_count); if (ret) { - DRM_ERROR("CMD: cmd_parser_init failed!\n"); + DRM_ERROR("%s: initialised failed!\n", engine->name); fini_hash_table(engine); - return ret; + return; } engine->needs_cmd_parser = true; - - return 0; } /** @@ -853,12 +877,9 @@ find_cmd_in_table(struct intel_engine_cs *engine, struct cmd_node *desc_node; hash_for_each_possible(engine->cmd_hash, desc_node, node, - cmd_header & CMD_HASH_MASK) { + cmd_header_key(cmd_header)) { const struct drm_i915_cmd_descriptor *desc = desc_node->desc; - u32 masked_cmd = desc->cmd.mask & cmd_header; - u32 masked_value = desc->cmd.value & desc->cmd.mask; - - if (masked_cmd == masked_value) + if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0) return desc; } @@ -876,11 +897,14 @@ find_cmd_in_table(struct intel_engine_cs *engine, static const struct drm_i915_cmd_descriptor* find_cmd(struct intel_engine_cs *engine, u32 cmd_header, + const struct drm_i915_cmd_descriptor *desc, struct drm_i915_cmd_descriptor *default_desc) { - const struct drm_i915_cmd_descriptor *desc; u32 mask; + if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0) + return desc; + desc = find_cmd_in_table(engine, cmd_header); if (desc) return desc; @@ -889,140 +913,127 @@ find_cmd(struct intel_engine_cs *engine, if (!mask) return NULL; - BUG_ON(!default_desc); - default_desc->flags = CMD_DESC_SKIP; + default_desc->cmd.value = cmd_header; + default_desc->cmd.mask = ~0u << MIN_OPCODE_SHIFT; default_desc->length.mask = mask; - + default_desc->flags = CMD_DESC_SKIP; return default_desc; } static const struct drm_i915_reg_descriptor * -find_reg(const struct drm_i915_reg_descriptor *table, - int count, u32 addr) +__find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr) { - int i; - - for (i = 0; i < count; i++) { - if (i915_mmio_reg_offset(table[i].addr) == addr) - return &table[i]; + int start = 0, end = count; + while (start < end) { + int mid = start + (end - start) / 2; + int ret = addr - i915_mmio_reg_offset(table[mid].addr); + if (ret < 0) + end = mid; + else if (ret > 0) + start = mid + 1; + else + return &table[mid]; } - return NULL; } static const struct drm_i915_reg_descriptor * -find_reg_in_tables(const struct drm_i915_reg_table *tables, - int count, bool is_master, u32 addr) +find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) { - int i; - const struct drm_i915_reg_table *table; - const struct drm_i915_reg_descriptor *reg; + const struct drm_i915_reg_table *table = engine->reg_tables; + int count = engine->reg_table_count; - for (i = 0; i < count; i++) { - table = &tables[i]; + do { if (!table->master || is_master) { - reg = find_reg(table->regs, table->num_regs, - addr); + const struct drm_i915_reg_descriptor *reg; + + reg = __find_reg(table->regs, table->num_regs, addr); if (reg != NULL) return reg; } - } + } while (table++, --count); return NULL; } -static u32 *vmap_batch(struct drm_i915_gem_object *obj, - unsigned start, unsigned len) -{ - int i; - void *addr = NULL; - struct sg_page_iter sg_iter; - int first_page = start >> PAGE_SHIFT; - int last_page = (len + start + 4095) >> PAGE_SHIFT; - int npages = last_page - first_page; - struct page **pages; - - pages = drm_malloc_ab(npages, sizeof(*pages)); - if (pages == NULL) { - DRM_DEBUG_DRIVER("Failed to get space for pages\n"); - goto finish; - } - - i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) { - pages[i++] = sg_page_iter_page(&sg_iter); - if (i == npages) - break; - } - - addr = vmap(pages, i, 0, PAGE_KERNEL); - if (addr == NULL) { - DRM_DEBUG_DRIVER("Failed to vmap pages\n"); - goto finish; - } - -finish: - if (pages) - drm_free_large(pages); - return (u32*)addr; -} - -/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */ -static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, +/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ +static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, struct drm_i915_gem_object *src_obj, u32 batch_start_offset, - u32 batch_len) + u32 batch_len, + bool *needs_clflush_after) { - int needs_clflush = 0; - void *src_base, *src; - void *dst = NULL; + unsigned int src_needs_clflush; + unsigned int dst_needs_clflush; + void *dst, *src; int ret; - if (batch_len > dest_obj->base.size || - batch_len + batch_start_offset > src_obj->base.size) - return ERR_PTR(-E2BIG); - - if (WARN_ON(dest_obj->pages_pin_count == 0)) - return ERR_PTR(-ENODEV); - - ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush); - if (ret) { - DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n"); + ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush); + if (ret) return ERR_PTR(ret); - } - src_base = vmap_batch(src_obj, batch_start_offset, batch_len); - if (!src_base) { - DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); - ret = -ENOMEM; + ret = i915_gem_obj_prepare_shmem_write(dst_obj, &dst_needs_clflush); + if (ret) { + dst = ERR_PTR(ret); goto unpin_src; } - ret = i915_gem_object_set_to_cpu_domain(dest_obj, true); - if (ret) { - DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n"); - goto unmap_src; + dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB); + if (IS_ERR(dst)) + goto unpin_dst; + + src = ERR_PTR(-ENODEV); + if (src_needs_clflush && + i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, 0, 0)) { + src = i915_gem_object_pin_map(src_obj, I915_MAP_WC); + if (!IS_ERR(src)) { + i915_memcpy_from_wc(dst, + src + batch_start_offset, + ALIGN(batch_len, 16)); + i915_gem_object_unpin_map(src_obj); + } } - - dst = vmap_batch(dest_obj, 0, batch_len); - if (!dst) { - DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n"); - ret = -ENOMEM; - goto unmap_src; + if (IS_ERR(src)) { + void *ptr; + int offset, n; + + offset = offset_in_page(batch_start_offset); + + /* We can avoid clflushing partial cachelines before the write + * if we only every write full cache-lines. Since we know that + * both the source and destination are in multiples of + * PAGE_SIZE, we can simply round up to the next cacheline. + * We don't care about copying too much here as we only + * validate up to the end of the batch. + */ + if (dst_needs_clflush & CLFLUSH_BEFORE) + batch_len = roundup(batch_len, + boot_cpu_data.x86_clflush_size); + + ptr = dst; + for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) { + int len = min_t(int, batch_len, PAGE_SIZE - offset); + + src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); + if (src_needs_clflush) + drm_clflush_virt_range(src + offset, len); + memcpy(ptr, src + offset, len); + kunmap_atomic(src); + + ptr += len; + batch_len -= len; + offset = 0; + } } - src = src_base + offset_in_page(batch_start_offset); - if (needs_clflush) - drm_clflush_virt_range(src, batch_len); - - memcpy(dst, src, batch_len); + /* dst_obj is returned with vmap pinned */ + *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER; -unmap_src: - vunmap(src_base); +unpin_dst: + i915_gem_obj_finish_shmem_access(dst_obj); unpin_src: - i915_gem_object_unpin_pages(src_obj); - - return ret ? ERR_PTR(ret) : dst; + i915_gem_obj_finish_shmem_access(src_obj); + return dst; } /** @@ -1052,6 +1063,9 @@ static bool check_cmd(const struct intel_engine_cs *engine, const bool is_master, bool *oacontrol_set) { + if (desc->flags & CMD_DESC_SKIP) + return true; + if (desc->flags & CMD_DESC_REJECT) { DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd); return false; @@ -1076,10 +1090,7 @@ static bool check_cmd(const struct intel_engine_cs *engine, offset += step) { const u32 reg_addr = cmd[offset] & desc->reg.mask; const struct drm_i915_reg_descriptor *reg = - find_reg_in_tables(engine->reg_tables, - engine->reg_table_count, - is_master, - reg_addr); + find_reg(engine, is_master, reg_addr); if (!reg) { DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (exec_id=%d)\n", @@ -1200,16 +1211,19 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, u32 batch_len, bool is_master) { - u32 *cmd, *batch_base, *batch_end; - struct drm_i915_cmd_descriptor default_desc = { 0 }; + u32 *cmd, *batch_end; + struct drm_i915_cmd_descriptor default_desc = noop_desc; + const struct drm_i915_cmd_descriptor *desc = &default_desc; bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ + bool needs_clflush_after = false; int ret = 0; - batch_base = copy_batch(shadow_batch_obj, batch_obj, - batch_start_offset, batch_len); - if (IS_ERR(batch_base)) { + cmd = copy_batch(shadow_batch_obj, batch_obj, + batch_start_offset, batch_len, + &needs_clflush_after); + if (IS_ERR(cmd)) { DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n"); - return PTR_ERR(batch_base); + return PTR_ERR(cmd); } /* @@ -1217,17 +1231,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, * large or larger and copy_batch() will write MI_NOPs to the extra * space. Parsing should be faster in some cases this way. */ - batch_end = batch_base + (batch_len / sizeof(*batch_end)); - - cmd = batch_base; + batch_end = cmd + (batch_len / sizeof(*batch_end)); while (cmd < batch_end) { - const struct drm_i915_cmd_descriptor *desc; u32 length; if (*cmd == MI_BATCH_BUFFER_END) break; - desc = find_cmd(engine, *cmd, &default_desc); + desc = find_cmd(engine, *cmd, desc, &default_desc); if (!desc) { DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", *cmd); @@ -1278,7 +1289,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ret = -EINVAL; } - vunmap(batch_base); + if (ret == 0 && needs_clflush_after) + drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len); + i915_gem_object_unpin_map(shadow_batch_obj); return ret; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 64e41cf..a95d7bc 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -40,12 +40,6 @@ #include <drm/i915_drm.h> #include "i915_drv.h" -enum { - ACTIVE_LIST, - INACTIVE_LIST, - PINNED_LIST, -}; - /* As the drm_debugfs_init() routines are called before dev->dev_private is * allocated we need to hook into the minor for release. */ static int @@ -111,7 +105,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj) static char get_global_flag(struct drm_i915_gem_object *obj) { - return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; + return i915_gem_object_to_ggtt(obj, NULL) ? 'g' : ' '; } static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) @@ -158,11 +152,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, "%x ", i915_gem_active_get_seqno(&obj->last_read[id], &obj->base.dev->struct_mutex)); - seq_printf(m, "] %x %x%s%s%s", + seq_printf(m, "] %x %s%s%s", i915_gem_active_get_seqno(&obj->last_write, &obj->base.dev->struct_mutex), - i915_gem_active_get_seqno(&obj->last_fence, - &obj->base.dev->struct_mutex), i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), obj->dirty ? " dirty" : "", obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); @@ -175,8 +167,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (pinned x %d)", pin_count); if (obj->pin_display) seq_printf(m, " (display)"); - if (obj->fence_reg != I915_FENCE_REG_NONE) - seq_printf(m, " (fence: %d)", obj->fence_reg); list_for_each_entry(vma, &obj->vma_list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -186,6 +176,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) vma->node.start, vma->node.size); if (i915_vma_is_ggtt(vma)) seq_printf(m, ", type: %u", vma->ggtt_view.type); + if (vma->fence) + seq_printf(m, " , fence: %d%s", + vma->fence->id, + i915_gem_active_isset(&vma->last_fence) ? "*" : ""); seq_puts(m, ")"); } if (obj->stolen) @@ -210,53 +204,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits); } -static int i915_gem_object_list_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = m->private; - uintptr_t list = (uintptr_t) node->info_ent->data; - struct list_head *head; - struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct i915_vma *vma; - u64 total_obj_size, total_gtt_size; - int count, ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - /* FIXME: the user of this interface might want more than just GGTT */ - switch (list) { - case ACTIVE_LIST: - seq_puts(m, "Active:\n"); - head = &ggtt->base.active_list; - break; - case INACTIVE_LIST: - seq_puts(m, "Inactive:\n"); - head = &ggtt->base.inactive_list; - break; - default: - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(vma, head, vm_link) { - seq_printf(m, " "); - describe_obj(m, vma->obj); - seq_printf(m, "\n"); - total_obj_size += vma->obj->base.size; - total_gtt_size += vma->node.size; - count++; - } - mutex_unlock(&dev->struct_mutex); - - seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", - count, total_obj_size, total_gtt_size); - return 0; -} - static int obj_rank_by_stolen(void *priv, struct list_head *A, struct list_head *B) { @@ -322,17 +269,6 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) return 0; } -#define count_objects(list, member) do { \ - list_for_each_entry(obj, list, member) { \ - size += i915_gem_obj_total_ggtt_size(obj); \ - ++count; \ - if (obj->map_and_fenceable) { \ - mappable_size += i915_gem_obj_ggtt_size(obj); \ - ++mappable_count; \ - } \ - } \ -} while (0) - struct file_stats { struct drm_i915_file_private *file_priv; unsigned long count; @@ -418,9 +354,9 @@ static int per_file_ctx_stats(int id, void *ptr, void *data) for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { if (ctx->engine[n].state) - per_file_stats(0, ctx->engine[n].state, data); + per_file_stats(0, ctx->engine[n].state->obj, data); if (ctx->engine[n].ring) - per_file_stats(0, ctx->engine[n].ring->obj, data); + per_file_stats(0, ctx->engine[n].ring->vma->obj, data); } return 0; @@ -447,30 +383,16 @@ static void print_context_stats(struct seq_file *m, print_file_stats(m, "[k]contexts", stats); } -#define count_vmas(list, member) do { \ - list_for_each_entry(vma, list, member) { \ - size += i915_gem_obj_total_ggtt_size(vma->obj); \ - ++count; \ - if (vma->obj->map_and_fenceable) { \ - mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ - ++mappable_count; \ - } \ - } \ -} while (0) - static int i915_gem_object_info(struct seq_file *m, void* data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; - u32 count, mappable_count, purgeable_count; - u64 size, mappable_size, purgeable_size; - unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0; - u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0; + u32 count, mapped_count, purgeable_count, dpy_count; + u64 size, mapped_size, purgeable_size, dpy_size; struct drm_i915_gem_object *obj; struct drm_file *file; - struct i915_vma *vma; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -481,70 +403,53 @@ static int i915_gem_object_info(struct seq_file *m, void* data) dev_priv->mm.object_count, dev_priv->mm.object_memory); - size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.bound_list, global_list); - seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n", - count, mappable_count, size, mappable_size); - - size = count = mappable_size = mappable_count = 0; - count_vmas(&ggtt->base.active_list, vm_link); - seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", - count, mappable_count, size, mappable_size); + size = count = 0; + mapped_size = mapped_count = 0; + purgeable_size = purgeable_count = 0; + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { + size += obj->base.size; + ++count; - size = count = mappable_size = mappable_count = 0; - count_vmas(&ggtt->base.inactive_list, vm_link); - seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", - count, mappable_count, size, mappable_size); + if (obj->madv == I915_MADV_DONTNEED) { + purgeable_size += obj->base.size; + ++purgeable_count; + } - size = count = purgeable_size = purgeable_count = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { - size += obj->base.size, ++count; - if (obj->madv == I915_MADV_DONTNEED) - purgeable_size += obj->base.size, ++purgeable_count; if (obj->mapping) { - pin_mapped_count++; - pin_mapped_size += obj->base.size; - if (obj->pages_pin_count == 0) { - pin_mapped_purgeable_count++; - pin_mapped_purgeable_size += obj->base.size; - } + mapped_count++; + mapped_size += obj->base.size; } } seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); - size = count = mappable_size = mappable_count = 0; + size = count = dpy_size = dpy_count = 0; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (obj->fault_mappable) { - size += i915_gem_obj_ggtt_size(obj); - ++count; - } + size += obj->base.size; + ++count; + if (obj->pin_display) { - mappable_size += i915_gem_obj_ggtt_size(obj); - ++mappable_count; + dpy_size += obj->base.size; + ++dpy_count; } + if (obj->madv == I915_MADV_DONTNEED) { purgeable_size += obj->base.size; ++purgeable_count; } + if (obj->mapping) { - pin_mapped_count++; - pin_mapped_size += obj->base.size; - if (obj->pages_pin_count == 0) { - pin_mapped_purgeable_count++; - pin_mapped_purgeable_size += obj->base.size; - } + mapped_count++; + mapped_size += obj->base.size; } } + seq_printf(m, "%u bound objects, %llu bytes\n", + count, size); seq_printf(m, "%u purgeable objects, %llu bytes\n", purgeable_count, purgeable_size); - seq_printf(m, "%u pinned mappable objects, %llu bytes\n", - mappable_count, mappable_size); - seq_printf(m, "%u fault mappable objects, %llu bytes\n", - count, size); - seq_printf(m, - "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n", - pin_mapped_count, pin_mapped_purgeable_count, - pin_mapped_size, pin_mapped_purgeable_size); + seq_printf(m, "%u mapped objects, %llu bytes\n", + mapped_count, mapped_size); + seq_printf(m, "%u display objects (pinned), %llu bytes\n", + dpy_count, dpy_size); seq_printf(m, "%llu [%llu] gtt total\n", ggtt->base.total, ggtt->mappable_end - ggtt->base.start); @@ -557,6 +462,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data) print_context_stats(m, dev_priv); list_for_each_entry_reverse(file, &dev->filelist, lhead) { struct file_stats stats; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_request *request; struct task_struct *task; memset(&stats, 0, sizeof(stats)); @@ -570,10 +477,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data) * still alive (e.g. get_pid(current) => fork() => exit()). * Therefore, we need to protect this ->comm access using RCU. */ + mutex_lock(&dev->struct_mutex); + request = list_first_entry_or_null(&file_priv->mm.request_list, + struct drm_i915_gem_request, + client_list); rcu_read_lock(); - task = pid_task(file->pid, PIDTYPE_PID); + task = pid_task(request && request->ctx->pid ? + request->ctx->pid : file->pid, + PIDTYPE_PID); print_file_stats(m, task ? task->comm : "<unknown>", stats); rcu_read_unlock(); + mutex_unlock(&dev->struct_mutex); } mutex_unlock(&dev->filelist_mutex); @@ -584,8 +498,8 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - uintptr_t list = (uintptr_t) node->info_ent->data; struct drm_i915_private *dev_priv = to_i915(dev); + bool show_pin_display_only = !!data; struct drm_i915_gem_object *obj; u64 total_obj_size, total_gtt_size; int count, ret; @@ -596,7 +510,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) total_obj_size = total_gtt_size = count = 0; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) + if (show_pin_display_only && !obj->pin_display) continue; seq_puts(m, " "); @@ -755,12 +669,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data) seq_printf(m, "%s requests: %d\n", engine->name, count); list_for_each_entry(req, &engine->request_list, link) { + struct pid *pid = req->ctx->pid; struct task_struct *task; rcu_read_lock(); - task = NULL; - if (req->pid) - task = pid_task(req->pid, PIDTYPE_PID); + task = pid ? pid_task(pid, PIDTYPE_PID) : NULL; seq_printf(m, " %x @ %d: %s [%d]\n", req->fence.seqno, (int) (jiffies - req->emitted_jiffies), @@ -787,8 +700,6 @@ static void i915_ring_seqno_info(struct seq_file *m, seq_printf(m, "Current sequence (%s): %x\n", engine->name, intel_engine_get_seqno(engine)); - seq_printf(m, "Current user interrupts (%s): %lx\n", - engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups)); spin_lock(&b->lock); for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { @@ -1027,14 +938,14 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; + struct i915_vma *vma = dev_priv->fence_regs[i].vma; seq_printf(m, "Fence %d, pin count = %d, object = ", i, dev_priv->fence_regs[i].pin_count); - if (obj == NULL) + if (!vma) seq_puts(m, "unused"); else - describe_obj(m, obj); + describe_obj(m, vma->obj); seq_putc(m, '\n'); } @@ -1434,11 +1345,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) engine->hangcheck.seqno, seqno[id], engine->last_submitted_seqno); - seq_printf(m, "\twaiters? %d\n", - intel_engine_has_waiter(engine)); - seq_printf(m, "\tuser interrupts = %lx [current %lx]\n", - engine->hangcheck.user_interrupts, - READ_ONCE(engine->breadcrumbs.irq_wakeups)); + seq_printf(m, "\twaiters? %s, fake irq active? %s\n", + yesno(intel_engine_has_waiter(engine)), + yesno(test_bit(engine->id, + &dev_priv->gpu_error.missed_irq_rings))); seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", (long long)engine->hangcheck.acthd, (long long)acthd[id]); @@ -2052,18 +1962,17 @@ static int i915_context_status(struct seq_file *m, void *unused) list_for_each_entry(ctx, &dev_priv->context_list, link) { seq_printf(m, "HW context %u ", ctx->hw_id); - if (IS_ERR(ctx->file_priv)) { - seq_puts(m, "(deleted) "); - } else if (ctx->file_priv) { - struct pid *pid = ctx->file_priv->file->pid; + if (ctx->pid) { struct task_struct *task; - task = get_pid_task(pid, PIDTYPE_PID); + task = get_pid_task(ctx->pid, PIDTYPE_PID); if (task) { seq_printf(m, "(%s [%d]) ", task->comm, task->pid); put_task_struct(task); } + } else if (IS_ERR(ctx->file_priv)) { + seq_puts(m, "(deleted) "); } else { seq_puts(m, "(kernel) "); } @@ -2077,7 +1986,7 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_printf(m, "%s: ", engine->name); seq_putc(m, ce->initialised ? 'I' : 'i'); if (ce->state) - describe_obj(m, ce->state); + describe_obj(m, ce->state->obj); if (ce->ring) describe_ctx_ring(m, ce->ring); seq_putc(m, '\n'); @@ -2095,36 +2004,34 @@ static void i915_dump_lrc_obj(struct seq_file *m, struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; + struct i915_vma *vma = ctx->engine[engine->id].state; struct page *page; - uint32_t *reg_state; int j; - unsigned long ggtt_offset = 0; seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); - if (ctx_obj == NULL) { - seq_puts(m, "\tNot allocated\n"); + if (!vma) { + seq_puts(m, "\tFake context\n"); return; } - if (!i915_gem_obj_ggtt_bound(ctx_obj)) - seq_puts(m, "\tNot bound in GGTT\n"); - else - ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj); + if (vma->flags & I915_VMA_GLOBAL_BIND) + seq_printf(m, "\tBound in GGTT at 0x%08x\n", + i915_ggtt_offset(vma)); - if (i915_gem_object_get_pages(ctx_obj)) { - seq_puts(m, "\tFailed to get pages for context object\n"); + if (i915_gem_object_get_pages(vma->obj)) { + seq_puts(m, "\tFailed to get pages for context object\n\n"); return; } - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); - if (!WARN_ON(page == NULL)) { - reg_state = kmap_atomic(page); + page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN); + if (page) { + u32 *reg_state = kmap_atomic(page); for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { - seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", - ggtt_offset + 4096 + (j * 4), + seq_printf(m, + "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n", + j * 4, reg_state[j], reg_state[j + 1], reg_state[j + 2], reg_state[j + 3]); } @@ -2444,6 +2351,20 @@ static int count_irq_waiters(struct drm_i915_private *i915) return count; } +static const char *rps_power_to_str(unsigned int power) +{ + static const char * const strings[] = { + [LOW_POWER] = "low power", + [BETWEEN] = "mixed", + [HIGH_POWER] = "high power", + }; + + if (power >= ARRAY_SIZE(strings) || !strings[power]) + return "unknown"; + + return strings[power]; +} + static int i915_rps_boost_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; @@ -2455,12 +2376,17 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) seq_printf(m, "GPU busy? %s [%x]\n", yesno(dev_priv->gt.awake), dev_priv->gt.active_engines); seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); - seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", - intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), + seq_printf(m, "Frequency requested %d\n", + intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); + seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); + seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", + intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), + intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), + intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq)); mutex_lock(&dev->filelist_mutex); spin_lock(&dev_priv->rps.client_lock); @@ -2481,6 +2407,31 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) spin_unlock(&dev_priv->rps.client_lock); mutex_unlock(&dev->filelist_mutex); + if (INTEL_GEN(dev_priv) >= 6 && + dev_priv->rps.enabled && + dev_priv->gt.active_engines) { + u32 rpup, rpupei; + u32 rpdown, rpdownei; + + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; + rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; + rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; + rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + + seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", + rps_power_to_str(dev_priv->rps.power)); + seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", + 100 * rpup / rpupei, + dev_priv->rps.up_threshold); + seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", + 100 * rpdown / rpdownei, + dev_priv->rps.down_threshold); + } else { + seq_puts(m, "\nRPS Autotuning inactive\n"); + } + return 0; } @@ -2547,6 +2498,7 @@ static void i915_guc_client_info(struct seq_file *m, struct i915_guc_client *client) { struct intel_engine_cs *engine; + enum intel_engine_id id; uint64_t tot = 0; seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", @@ -2557,15 +2509,14 @@ static void i915_guc_client_info(struct seq_file *m, client->wq_size, client->wq_offset, client->wq_tail); seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space); - seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); seq_printf(m, "\tLast submission result: %d\n", client->retcode); - for_each_engine(engine, dev_priv) { + for_each_engine_id(engine, dev_priv, id) { + u64 submissions = client->submissions[id]; + tot += submissions; seq_printf(m, "\tSubmissions: %llu %s\n", - client->submissions[engine->id], - engine->name); - tot += client->submissions[engine->id]; + submissions, engine->name); } seq_printf(m, "\tTotal: %llu\n", tot); } @@ -2578,6 +2529,7 @@ static int i915_guc_info(struct seq_file *m, void *data) struct intel_guc guc; struct i915_guc_client client = {}; struct intel_engine_cs *engine; + enum intel_engine_id id; u64 total = 0; if (!HAS_GUC_SCHED(dev_priv)) @@ -2604,11 +2556,11 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "GuC last action error code: %d\n", guc.action_err); seq_printf(m, "\nGuC submissions:\n"); - for_each_engine(engine, dev_priv) { + for_each_engine_id(engine, dev_priv, id) { + u64 submissions = guc.submissions[id]; + total += submissions; seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", - engine->name, guc.submissions[engine->id], - guc.last_seqno[engine->id]); - total += guc.submissions[engine->id]; + engine->name, submissions, guc.last_seqno[id]); } seq_printf(m, "\t%s: %llu\n", "Total", total); @@ -2625,15 +2577,15 @@ static int i915_guc_log_dump(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; - u32 *log; + struct drm_i915_gem_object *obj; int i = 0, pg; - if (!log_obj) + if (!dev_priv->guc.log_vma) return 0; - for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) { - log = kmap_atomic(i915_gem_object_get_page(log_obj, pg)); + obj = dev_priv->guc.log_vma->obj; + for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) { + u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg)); for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4) seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", @@ -3237,7 +3189,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; - int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + int num_rings = INTEL_INFO(dev)->num_rings; enum intel_engine_id id; int j, ret; @@ -3255,7 +3207,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) struct page *page; uint64_t *seqno; - page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); + page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0); seqno = (uint64_t *)kmap_atomic(page); for_each_engine_id(engine, dev_priv, id) { @@ -5386,9 +5338,7 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0}, {"i915_gem_objects", i915_gem_object_info, 0}, {"i915_gem_gtt", i915_gem_gtt_info, 0}, - {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, - {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, - {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, + {"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1}, {"i915_gem_stolen", i915_gem_stolen_list_info }, {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_request", i915_gem_request_info, 0}, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 57eb380..13ae340 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -827,6 +827,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->pps_mutex); + i915_memcpy_init_early(dev_priv); + ret = i915_workqueues_init(dev_priv); if (ret < 0) return ret; @@ -1560,6 +1562,7 @@ static int i915_drm_resume(struct drm_device *dev) i915_gem_resume(dev); i915_restore_state(dev); + intel_pps_unlock_regs_wa(dev_priv); intel_opregion_setup(dev_priv); intel_init_pch_refclk(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c36d176..e6069057 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -70,7 +70,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20160808" +#define DRIVER_DATE "20160822" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -455,15 +455,21 @@ struct intel_opregion { struct intel_overlay; struct intel_overlay_error_state; -#define I915_FENCE_REG_NONE -1 -#define I915_MAX_NUM_FENCES 32 -/* 32 fences + sign bit for FENCE_REG_NONE */ -#define I915_MAX_NUM_FENCE_BITS 6 - struct drm_i915_fence_reg { - struct list_head lru_list; - struct drm_i915_gem_object *obj; + struct list_head link; + struct drm_i915_private *i915; + struct i915_vma *vma; int pin_count; + int id; + /** + * Whether the tiling parameters for the currently + * associated fence register have changed. Note that + * for the purposes of tracking tiling changes we also + * treat the unfenced register, the register slot that + * the object occupies whilst it executes a fenced + * command (such as BLT on gen2/3), as a "fence". + */ + bool dirty; }; struct sdvo_device_mapping { @@ -475,130 +481,6 @@ struct sdvo_device_mapping { u8 ddc_pin; }; -struct intel_display_error_state; - -struct drm_i915_error_state { - struct kref ref; - struct timeval time; - - char error_msg[128]; - bool simulated; - int iommu; - u32 reset_count; - u32 suspend_count; - - /* Generic register state */ - u32 eir; - u32 pgtbl_er; - u32 ier; - u32 gtier[4]; - u32 ccid; - u32 derrmr; - u32 forcewake; - u32 error; /* gen6+ */ - u32 err_int; /* gen7 */ - u32 fault_data0; /* gen8, gen9 */ - u32 fault_data1; /* gen8, gen9 */ - u32 done_reg; - u32 gac_eco; - u32 gam_ecochk; - u32 gab_ctl; - u32 gfx_mode; - u32 extra_instdone[I915_NUM_INSTDONE_REG]; - u64 fence[I915_MAX_NUM_FENCES]; - struct intel_overlay_error_state *overlay; - struct intel_display_error_state *display; - struct drm_i915_error_object *semaphore_obj; - - struct drm_i915_error_engine { - int engine_id; - /* Software tracked state */ - bool waiting; - int num_waiters; - int hangcheck_score; - enum intel_engine_hangcheck_action hangcheck_action; - int num_requests; - - /* our own tracking of ring head and tail */ - u32 cpu_ring_head; - u32 cpu_ring_tail; - - u32 last_seqno; - u32 semaphore_seqno[I915_NUM_ENGINES - 1]; - - /* Register state */ - u32 start; - u32 tail; - u32 head; - u32 ctl; - u32 hws; - u32 ipeir; - u32 ipehr; - u32 instdone; - u32 bbstate; - u32 instpm; - u32 instps; - u32 seqno; - u64 bbaddr; - u64 acthd; - u32 fault_reg; - u64 faddr; - u32 rc_psmi; /* sleep state */ - u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; - - struct drm_i915_error_object { - int page_count; - u64 gtt_offset; - u32 *pages[0]; - } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; - - struct drm_i915_error_object *wa_ctx; - - struct drm_i915_error_request { - long jiffies; - u32 seqno; - u32 tail; - } *requests; - - struct drm_i915_error_waiter { - char comm[TASK_COMM_LEN]; - pid_t pid; - u32 seqno; - } *waiters; - - struct { - u32 gfx_mode; - union { - u64 pdp[4]; - u32 pp_dir_base; - }; - } vm_info; - - pid_t pid; - char comm[TASK_COMM_LEN]; - } engine[I915_NUM_ENGINES]; - - struct drm_i915_error_buffer { - u32 size; - u32 name; - u32 rseqno[I915_NUM_ENGINES], wseqno; - u64 gtt_offset; - u32 read_domains; - u32 write_domain; - s32 fence_reg:I915_MAX_NUM_FENCE_BITS; - s32 pinned:2; - u32 tiling:2; - u32 dirty:1; - u32 purgeable:1; - u32 userptr:1; - s32 engine:4; - u32 cache_level:3; - } **active_bo, **pinned_bo; - - u32 *active_bo_count, *pinned_bo_count; - u32 vm_count; -}; - struct intel_connector; struct intel_encoder; struct intel_crtc_state; @@ -793,6 +675,7 @@ struct intel_device_info { u8 gen; u16 gen_mask; u8 ring_mask; /* Rings supported by the HW */ + u8 num_rings; DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); /* Register offsets for the various display pipes and transcoders */ int pipe_offsets[I915_MAX_TRANSCODERS]; @@ -822,6 +705,134 @@ struct intel_device_info { #undef DEFINE_FLAG #undef SEP_SEMICOLON +struct intel_display_error_state; + +struct drm_i915_error_state { + struct kref ref; + struct timeval time; + + char error_msg[128]; + bool simulated; + int iommu; + u32 reset_count; + u32 suspend_count; + struct intel_device_info device_info; + + /* Generic register state */ + u32 eir; + u32 pgtbl_er; + u32 ier; + u32 gtier[4]; + u32 ccid; + u32 derrmr; + u32 forcewake; + u32 error; /* gen6+ */ + u32 err_int; /* gen7 */ + u32 fault_data0; /* gen8, gen9 */ + u32 fault_data1; /* gen8, gen9 */ + u32 done_reg; + u32 gac_eco; + u32 gam_ecochk; + u32 gab_ctl; + u32 gfx_mode; + u32 extra_instdone[I915_NUM_INSTDONE_REG]; + u64 fence[I915_MAX_NUM_FENCES]; + struct intel_overlay_error_state *overlay; + struct intel_display_error_state *display; + struct drm_i915_error_object *semaphore; + + struct drm_i915_error_engine { + int engine_id; + /* Software tracked state */ + bool waiting; + int num_waiters; + int hangcheck_score; + enum intel_engine_hangcheck_action hangcheck_action; + struct i915_address_space *vm; + int num_requests; + + /* our own tracking of ring head and tail */ + u32 cpu_ring_head; + u32 cpu_ring_tail; + + u32 last_seqno; + u32 semaphore_seqno[I915_NUM_ENGINES - 1]; + + /* Register state */ + u32 start; + u32 tail; + u32 head; + u32 ctl; + u32 mode; + u32 hws; + u32 ipeir; + u32 ipehr; + u32 instdone; + u32 bbstate; + u32 instpm; + u32 instps; + u32 seqno; + u64 bbaddr; + u64 acthd; + u32 fault_reg; + u64 faddr; + u32 rc_psmi; /* sleep state */ + u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; + + struct drm_i915_error_object { + int page_count; + u64 gtt_offset; + u64 gtt_size; + u32 *pages[0]; + } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; + + struct drm_i915_error_object *wa_ctx; + + struct drm_i915_error_request { + long jiffies; + pid_t pid; + u32 seqno; + u32 head; + u32 tail; + } *requests; + + struct drm_i915_error_waiter { + char comm[TASK_COMM_LEN]; + pid_t pid; + u32 seqno; + } *waiters; + + struct { + u32 gfx_mode; + union { + u64 pdp[4]; + u32 pp_dir_base; + }; + } vm_info; + + pid_t pid; + char comm[TASK_COMM_LEN]; + } engine[I915_NUM_ENGINES]; + + struct drm_i915_error_buffer { + u32 size; + u32 name; + u32 rseqno[I915_NUM_ENGINES], wseqno; + u64 gtt_offset; + u32 read_domains; + u32 write_domain; + s32 fence_reg:I915_MAX_NUM_FENCE_BITS; + u32 tiling:2; + u32 dirty:1; + u32 purgeable:1; + u32 userptr:1; + s32 engine:4; + u32 cache_level:3; + } *active_bo[I915_NUM_ENGINES], *pinned_bo; + u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; + struct i915_address_space *active_vm[I915_NUM_ENGINES]; +}; + enum i915_cache_level { I915_CACHE_NONE = 0, I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ @@ -878,22 +889,23 @@ struct i915_gem_context { struct drm_i915_private *i915; struct drm_i915_file_private *file_priv; struct i915_hw_ppgtt *ppgtt; + struct pid *pid; struct i915_ctx_hang_stats hang_stats; - /* Unique identifier for this context, used by the hw for tracking */ unsigned long flags; #define CONTEXT_NO_ZEROMAP BIT(0) #define CONTEXT_NO_ERROR_CAPTURE BIT(1) - unsigned hw_id; + + /* Unique identifier for this context, used by the hw for tracking */ + unsigned int hw_id; u32 user_handle; u32 ggtt_alignment; struct intel_context { - struct drm_i915_gem_object *state; + struct i915_vma *state; struct intel_ring *ring; - struct i915_vma *lrc_vma; uint32_t *lrc_reg_state; u64 lrc_desc; int pin_count; @@ -1061,13 +1073,6 @@ struct intel_gmbus { struct i915_suspend_saved_registers { u32 saveDSPARB; - u32 saveLVDS; - u32 savePP_ON_DELAYS; - u32 savePP_OFF_DELAYS; - u32 savePP_ON; - u32 savePP_OFF; - u32 savePP_CONTROL; - u32 savePP_DIVISOR; u32 saveFBC_CONTROL; u32 saveCACHE_MODE_0; u32 saveMI_ARB_STATE; @@ -1749,12 +1754,14 @@ struct drm_i915_private { uint32_t psr_mmio_base; + uint32_t pps_mmio_base; + wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; struct i915_gem_context *kernel_context; struct intel_engine_cs engine[I915_NUM_ENGINES]; - struct drm_i915_gem_object *semaphore_obj; + struct i915_vma *semaphore; u32 next_seqno; struct drm_dma_handle *status_page_dmah; @@ -1840,6 +1847,7 @@ struct drm_i915_private { enum modeset_restore modeset_restore; struct mutex modeset_restore_lock; struct drm_atomic_state *modeset_restore_state; + struct drm_modeset_acquire_ctx reset_ctx; struct list_head vm_list; /* Global list of all address spaces */ struct i915_ggtt ggtt; /* VM representing the global address space */ @@ -2171,33 +2179,11 @@ struct drm_i915_gem_object { unsigned int dirty:1; /** - * Fence register bits (if any) for this object. Will be set - * as needed when mapped into the GTT. - * Protected by dev->struct_mutex. - */ - signed int fence_reg:I915_MAX_NUM_FENCE_BITS; - - /** * Advice: are the backing pages purgeable? */ unsigned int madv:2; /** - * Whether the tiling parameters for the currently associated fence - * register have changed. Note that for the purposes of tracking - * tiling changes we also treat the unfenced register, the register - * slot that the object occupies whilst it executes a fenced - * command (such as BLT on gen2/3), as a "fence". - */ - unsigned int fence_dirty:1; - - /** - * Is the object at the current location in the gtt mappable and - * fenceable? Used to avoid costly recalculations. - */ - unsigned int map_and_fenceable:1; - - /** * Whether the current gtt mapping needs to be mappable (and isn't just * mappable by accident). Track pin and fault separate for a more * accurate mappable working set. @@ -2213,6 +2199,7 @@ struct drm_i915_gem_object { unsigned int cache_dirty:1; atomic_t frontbuffer_bits; + unsigned int frontbuffer_ggtt_origin; /* write once */ /** Current tiling stride for the object, if it's tiled. */ unsigned int tiling_and_stride; @@ -2220,7 +2207,6 @@ struct drm_i915_gem_object { #define TILING_MASK (FENCE_MINIMUM_STRIDE-1) #define STRIDE_MASK (~TILING_MASK) - unsigned int has_wc_mmap; /** Count of VMA actually bound by this object */ unsigned int bind_count; unsigned int pin_display; @@ -2245,7 +2231,6 @@ struct drm_i915_gem_object { */ struct i915_gem_active last_read[I915_NUM_ENGINES]; struct i915_gem_active last_write; - struct i915_gem_active last_fence; /** References from framebuffers, locks out tiling changes. */ unsigned long framebuffer_references; @@ -2375,6 +2360,18 @@ i915_gem_object_get_stride(struct drm_i915_gem_object *obj) return obj->tiling_and_stride & STRIDE_MASK; } +static inline struct i915_vma *i915_vma_get(struct i915_vma *vma) +{ + i915_gem_object_get(vma->obj); + return vma; +} + +static inline void i915_vma_put(struct i915_vma *vma) +{ + lockdep_assert_held(&vma->vm->dev->struct_mutex); + i915_gem_object_put(vma->obj); +} + /* * Optimised SGL iterator for GEM objects */ @@ -3066,7 +3063,7 @@ struct drm_i915_gem_object *i915_gem_object_create_from_data( void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); void i915_gem_free_object(struct drm_gem_object *obj); -int __must_check +struct i915_vma * __must_check i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, u64 size, @@ -3085,9 +3082,6 @@ int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); -int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, - int *needs_clflush); - int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); static inline int __sg_page_count(struct scatterlist *sg) @@ -3147,13 +3141,20 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) obj->pages_pin_count--; } +enum i915_map_type { + I915_MAP_WB = 0, + I915_MAP_WC, +}; + /** * i915_gem_object_pin_map - return a contiguous mapping of the entire object * @obj - the object to map into kernel address space + * @type - the type of mapping, used to select pgprot_t * * Calls i915_gem_object_pin_pages() to prevent reaping of the object's * pages and then returns a contiguous mapping of the backing storage into - * the kernel address space. + * the kernel address space. Based on the @type of mapping, the PTE will be + * set to either WriteBack or WriteCombine (via pgprot_t). * * The caller must hold the struct_mutex, and is responsible for calling * i915_gem_object_unpin_map() when the mapping is no longer required. @@ -3161,7 +3162,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) * Returns the pointer through which to access the mapped object, or an * ERR_PTR() on error. */ -void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj); +void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, + enum i915_map_type type); /** * i915_gem_object_unpin_map - releases an earlier mapping @@ -3180,6 +3182,20 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } +int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, + unsigned int *needs_clflush); +int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, + unsigned int *needs_clflush); +#define CLFLUSH_BEFORE 0x1 +#define CLFLUSH_AFTER 0x2 +#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) + +static inline void +i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_pages(obj); +} + int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct drm_i915_gem_request *to); @@ -3262,12 +3278,11 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); int __must_check i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); -int __must_check +struct i915_vma * __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, const struct i915_ggtt_view *view); -void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view); +void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); int i915_gem_open(struct drm_device *dev, struct drm_file *file); @@ -3287,71 +3302,81 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags); -u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, - const struct i915_ggtt_view *view); -u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, - struct i915_address_space *vm); -static inline u64 -i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) -{ - return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); -} - -bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, - const struct i915_ggtt_view *view); -bool i915_gem_obj_bound(struct drm_i915_gem_object *o, - struct i915_address_space *vm); - struct i915_vma * i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm); -struct i915_vma * -i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view); + struct i915_address_space *vm, + const struct i915_ggtt_view *view); struct i915_vma * i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm); -struct i915_vma * -i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view); - -static inline struct i915_vma * -i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) -{ - return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); -} -bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); + struct i915_address_space *vm, + const struct i915_ggtt_view *view); -/* Some GGTT VM helpers */ static inline struct i915_hw_ppgtt * i915_vm_to_ppgtt(struct i915_address_space *vm) { return container_of(vm, struct i915_hw_ppgtt, base); } -static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) +static inline struct i915_vma * +i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) { - return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); + return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); } -unsigned long -i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj); - -void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view); -static inline void -i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) +static inline unsigned long +i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view) { - i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); + return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view)); } /* i915_gem_fence.c */ -int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); -int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); +int __must_check i915_vma_get_fence(struct i915_vma *vma); +int __must_check i915_vma_put_fence(struct i915_vma *vma); + +/** + * i915_vma_pin_fence - pin fencing state + * @vma: vma to pin fencing for + * + * This pins the fencing state (whether tiled or untiled) to make sure the + * vma (and its object) is ready to be used as a scanout target. Fencing + * status must be synchronize first by calling i915_vma_get_fence(): + * + * The resulting fence pin reference must be released again with + * i915_vma_unpin_fence(). + * + * Returns: + * + * True if the vma has a fence, false otherwise. + */ +static inline bool +i915_vma_pin_fence(struct i915_vma *vma) +{ + if (vma->fence) { + vma->fence->pin_count++; + return true; + } else + return false; +} -bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); -void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); +/** + * i915_vma_unpin_fence - unpin fencing state + * @vma: vma to unpin fencing for + * + * This releases the fence pin reference acquired through + * i915_vma_pin_fence. It will handle both objects with and without an + * attached fence correctly, callers do not need to distinguish this. + */ +static inline void +i915_vma_unpin_fence(struct i915_vma *vma) +{ + if (vma->fence) { + GEM_BUG_ON(vma->fence->pin_count <= 0); + vma->fence->pin_count--; + } +} void i915_gem_restore_fences(struct drm_device *dev); @@ -3429,6 +3454,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); /* belongs in i915_gem_gtt.h */ static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) { + wmb(); if (INTEL_GEN(dev_priv) < 6) intel_gtt_chipset_flush(); } @@ -3516,7 +3542,7 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); -int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); +void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine); int intel_engine_cmd_parser(struct intel_engine_cs *engine, @@ -3848,7 +3874,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) * is woken. */ if (engine->irq_seqno_barrier && - READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current && + rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current && cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { struct task_struct *tsk; @@ -3873,7 +3899,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) * irq_posted == false but we are still running). */ rcu_read_lock(); - tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh); + tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); if (tsk && tsk != current) /* Note that if the bottom-half is changed as we * are sending the wake-up, the new bottom-half will @@ -3902,4 +3928,32 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) return false; } +void i915_memcpy_init_early(struct drm_i915_private *dev_priv); +bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); + +/* i915_mm.c */ +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap); + +#define ptr_mask_bits(ptr) ({ \ + unsigned long __v = (unsigned long)(ptr); \ + (typeof(ptr))(__v & PAGE_MASK); \ +}) + +#define ptr_unpack_bits(ptr, bits) ({ \ + unsigned long __v = (unsigned long)(ptr); \ + (bits) = __v & ~PAGE_MASK; \ + (typeof(ptr))(__v & PAGE_MASK); \ +}) + +#define ptr_pack_bits(ptr, bits) \ + ((typeof(ptr))((unsigned long)(ptr) | (bits))) + +#define fetch_and_zero(ptr) ({ \ + typeof(*ptr) __T = *(ptr); \ + *(ptr) = (typeof(*ptr))0; \ + __T; \ +}) + #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f4f8eaa..04607d4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -279,16 +279,25 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { .release = i915_gem_object_release_phys, }; -int -i915_gem_object_unbind(struct drm_i915_gem_object *obj) +int i915_gem_object_unbind(struct drm_i915_gem_object *obj) { struct i915_vma *vma; LIST_HEAD(still_in_list); int ret; - /* The vma will only be freed if it is marked as closed, and if we wait - * upon rendering to the vma, we may unbind anything in the list. + lockdep_assert_held(&obj->base.dev->struct_mutex); + + /* Closed vma are removed from the obj->vma_list - but they may + * still have an active binding on the object. To remove those we + * must wait for all rendering to complete to the object (as unbinding + * must anyway), and retire the requests. */ + ret = i915_gem_object_wait_rendering(obj, false); + if (ret) + return ret; + + i915_gem_retire_requests(to_i915(obj->base.dev)); + while ((vma = list_first_entry_or_null(&obj->vma_list, struct i915_vma, obj_link))) { @@ -600,34 +609,106 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, * flush the object from the CPU cache. */ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, - int *needs_clflush) + unsigned int *needs_clflush) { int ret; *needs_clflush = 0; - if (WARN_ON(!i915_gem_object_has_struct_page(obj))) - return -EINVAL; + if (!i915_gem_object_has_struct_page(obj)) + return -ENODEV; ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; - if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { - /* If we're not in the cpu read domain, set ourself into the gtt - * read domain and manually flush cachelines (if required). This - * optimizes for the case when the gpu will dirty the data - * anyway again before the next pread happens. */ + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + + i915_gem_object_flush_gtt_write_domain(obj); + + /* If we're not in the cpu read domain, set ourself into the gtt + * read domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will dirty the data + * anyway again before the next pread happens. + */ + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) *needs_clflush = !cpu_cache_is_coherent(obj->base.dev, obj->cache_level); + + if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { + ret = i915_gem_object_set_to_cpu_domain(obj, false); + if (ret) + goto err_unpin; + + *needs_clflush = 0; } + /* return with the pages pinned */ + return 0; + +err_unpin: + i915_gem_object_unpin_pages(obj); + return ret; +} + +int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, + unsigned int *needs_clflush) +{ + int ret; + + *needs_clflush = 0; + if (!i915_gem_object_has_struct_page(obj)) + return -ENODEV; + + ret = i915_gem_object_wait_rendering(obj, false); + if (ret) + return ret; + ret = i915_gem_object_get_pages(obj); if (ret) return ret; i915_gem_object_pin_pages(obj); + i915_gem_object_flush_gtt_write_domain(obj); + + /* If we're not in the cpu write domain, set ourself into the + * gtt write domain and manually flush cachelines (as required). + * This optimizes for the case when the gpu will use the data + * right away and we therefore have to clflush anyway. + */ + if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) + *needs_clflush |= cpu_write_needs_clflush(obj) << 1; + + /* Same trick applies to invalidate partially written cachelines read + * before writing. + */ + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) + *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev, + obj->cache_level); + + if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { + ret = i915_gem_object_set_to_cpu_domain(obj, true); + if (ret) + goto err_unpin; + + *needs_clflush = 0; + } + + if ((*needs_clflush & CLFLUSH_AFTER) == 0) + obj->cache_dirty = true; + + intel_fb_obj_invalidate(obj, ORIGIN_CPU); + obj->dirty = 1; + /* return with the pages pinned */ + return 0; + +err_unpin: + i915_gem_object_unpin_pages(obj); return ret; } @@ -737,14 +818,24 @@ i915_gem_gtt_pread(struct drm_device *dev, { struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_vma *vma; struct drm_mm_node node; char __user *user_data; uint64_t remain; uint64_t offset; int ret; - ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); - if (ret) { + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + if (!IS_ERR(vma)) { + node.start = i915_ggtt_offset(vma); + node.allocated = false; + ret = i915_vma_put_fence(vma); + if (ret) { + i915_vma_unpin(vma); + vma = ERR_PTR(ret); + } + } + if (IS_ERR(vma)) { ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE); if (ret) goto out; @@ -756,12 +847,6 @@ i915_gem_gtt_pread(struct drm_device *dev, } i915_gem_object_pin_pages(obj); - } else { - node.start = i915_gem_obj_ggtt_offset(obj); - node.allocated = false; - ret = i915_gem_object_put_fence(obj); - if (ret) - goto out_unpin; } ret = i915_gem_object_set_to_gtt_domain(obj, false); @@ -806,7 +891,7 @@ i915_gem_gtt_pread(struct drm_device *dev, * and write to user memory which may result into page * faults, and so we cannot perform this under struct_mutex. */ - if (slow_user_access(ggtt->mappable, page_base, + if (slow_user_access(&ggtt->mappable, page_base, page_offset, user_data, page_length, false)) { ret = -EFAULT; @@ -838,7 +923,7 @@ out_unpin: i915_gem_object_unpin_pages(obj); remove_mappable_node(&node); } else { - i915_gem_object_ggtt_unpin(obj); + i915_vma_unpin(vma); } out: return ret; @@ -859,19 +944,14 @@ i915_gem_shmem_pread(struct drm_device *dev, int needs_clflush = 0; struct sg_page_iter sg_iter; - if (!i915_gem_object_has_struct_page(obj)) - return -ENODEV; - - user_data = u64_to_user_ptr(args->data_ptr); - remain = args->size; - - obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); if (ret) return ret; + obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + user_data = u64_to_user_ptr(args->data_ptr); offset = args->offset; + remain = args->size; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, offset >> PAGE_SHIFT) { @@ -927,7 +1007,7 @@ next_page: } out: - i915_gem_object_unpin_pages(obj); + i915_gem_obj_finish_shmem_access(obj); return ret; } @@ -1036,6 +1116,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, { struct i915_ggtt *ggtt = &i915->ggtt; struct drm_device *dev = obj->base.dev; + struct i915_vma *vma; struct drm_mm_node node; uint64_t remain, offset; char __user *user_data; @@ -1045,9 +1126,18 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, if (i915_gem_object_is_tiled(obj)) return -EFAULT; - ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE | PIN_NONBLOCK); - if (ret) { + if (!IS_ERR(vma)) { + node.start = i915_ggtt_offset(vma); + node.allocated = false; + ret = i915_vma_put_fence(vma); + if (ret) { + i915_vma_unpin(vma); + vma = ERR_PTR(ret); + } + } + if (IS_ERR(vma)) { ret = insert_mappable_node(i915, &node, PAGE_SIZE); if (ret) goto out; @@ -1059,19 +1149,13 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, } i915_gem_object_pin_pages(obj); - } else { - node.start = i915_gem_obj_ggtt_offset(obj); - node.allocated = false; - ret = i915_gem_object_put_fence(obj); - if (ret) - goto out_unpin; } ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) goto out_unpin; - intel_fb_obj_invalidate(obj, ORIGIN_GTT); + intel_fb_obj_invalidate(obj, ORIGIN_CPU); obj->dirty = true; user_data = u64_to_user_ptr(args->data_ptr); @@ -1103,11 +1187,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, * If the object is non-shmem backed, we retry again with the * path that handles page fault. */ - if (fast_user_write(ggtt->mappable, page_base, + if (fast_user_write(&ggtt->mappable, page_base, page_offset, user_data, page_length)) { hit_slow_path = true; mutex_unlock(&dev->struct_mutex); - if (slow_user_access(ggtt->mappable, + if (slow_user_access(&ggtt->mappable, page_base, page_offset, user_data, page_length, true)) { @@ -1138,7 +1222,7 @@ out_flush: } } - intel_fb_obj_flush(obj, false, ORIGIN_GTT); + intel_fb_obj_flush(obj, false, ORIGIN_CPU); out_unpin: if (node.allocated) { wmb(); @@ -1148,7 +1232,7 @@ out_unpin: i915_gem_object_unpin_pages(obj); remove_mappable_node(&node); } else { - i915_gem_object_ggtt_unpin(obj); + i915_vma_unpin(vma); } out: return ret; @@ -1231,42 +1315,17 @@ i915_gem_shmem_pwrite(struct drm_device *dev, int shmem_page_offset, page_length, ret = 0; int obj_do_bit17_swizzling, page_do_bit17_swizzling; int hit_slowpath = 0; - int needs_clflush_after = 0; - int needs_clflush_before = 0; + unsigned int needs_clflush; struct sg_page_iter sg_iter; - user_data = u64_to_user_ptr(args->data_ptr); - remain = args->size; - - obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - - ret = i915_gem_object_wait_rendering(obj, false); + ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); if (ret) return ret; - if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { - /* If we're not in the cpu write domain, set ourself into the gtt - * write domain and manually flush cachelines (if required). This - * optimizes for the case when the gpu will use the data - * right away and we therefore have to clflush anyway. */ - needs_clflush_after = cpu_write_needs_clflush(obj); - } - /* Same trick applies to invalidate partially written cachelines read - * before writing. */ - if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) - needs_clflush_before = - !cpu_cache_is_coherent(dev, obj->cache_level); - - ret = i915_gem_object_get_pages(obj); - if (ret) - return ret; - - intel_fb_obj_invalidate(obj, ORIGIN_CPU); - - i915_gem_object_pin_pages(obj); - + obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + user_data = u64_to_user_ptr(args->data_ptr); offset = args->offset; - obj->dirty = 1; + remain = args->size; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, offset >> PAGE_SHIFT) { @@ -1290,7 +1349,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, /* If we don't overwrite a cacheline completely we need to be * careful to have up-to-date data by first clflushing. Don't * overcomplicate things and flush the entire patch. */ - partial_cacheline_write = needs_clflush_before && + partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE && ((shmem_page_offset | page_length) & (boot_cpu_data.x86_clflush_size - 1)); @@ -1300,7 +1359,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling, partial_cacheline_write, - needs_clflush_after); + needs_clflush & CLFLUSH_AFTER); if (ret == 0) goto next_page; @@ -1309,7 +1368,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling, partial_cacheline_write, - needs_clflush_after); + needs_clflush & CLFLUSH_AFTER); mutex_lock(&dev->struct_mutex); @@ -1323,7 +1382,7 @@ next_page: } out: - i915_gem_object_unpin_pages(obj); + i915_gem_obj_finish_shmem_access(obj); if (hit_slowpath) { /* @@ -1331,17 +1390,15 @@ out: * cachelines in-line while writing and the object moved * out of the cpu write domain while we've dropped the lock. */ - if (!needs_clflush_after && + if (!(needs_clflush & CLFLUSH_AFTER) && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { if (i915_gem_clflush_object(obj, obj->pin_display)) - needs_clflush_after = true; + needs_clflush |= CLFLUSH_AFTER; } } - if (needs_clflush_after) + if (needs_clflush & CLFLUSH_AFTER) i915_gem_chipset_flush(to_i915(dev)); - else - obj->cache_dirty = true; intel_fb_obj_flush(obj, false, ORIGIN_CPU); return ret; @@ -1420,10 +1477,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (ret == -EFAULT || ret == -ENOSPC) { if (obj->phys_handle) ret = i915_gem_phys_pwrite(obj, args, file); - else if (i915_gem_object_has_struct_page(obj)) - ret = i915_gem_shmem_pwrite(dev, obj, args, file); else - ret = -ENODEV; + ret = i915_gem_shmem_pwrite(dev, obj, args, file); } i915_gem_object_put(obj); @@ -1439,11 +1494,11 @@ err: return ret; } -static enum fb_op_origin +static inline enum fb_op_origin write_origin(struct drm_i915_gem_object *obj, unsigned domain) { - return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ? - ORIGIN_GTT : ORIGIN_CPU; + return (domain == I915_GEM_DOMAIN_GTT ? + obj->frontbuffer_ggtt_origin : ORIGIN_CPU); } /** @@ -1603,7 +1658,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, up_write(&mm->mmap_sem); /* This may race, but that's ok, it only gets set */ - WRITE_ONCE(obj->has_wc_mmap, true); + WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); } i915_gem_object_put_unlocked(obj); if (IS_ERR((void *)addr)) @@ -1614,9 +1669,19 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, return 0; } +static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) +{ + u64 size; + + size = i915_gem_object_get_stride(obj); + size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8; + + return size >> PAGE_SHIFT; +} + /** * i915_gem_fault - fault a page into the GTT - * @vma: VMA in question + * @area: CPU VMA in question * @vmf: fault info * * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped @@ -1630,20 +1695,21 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, * suffer if the GTT working set is large or there are few fence registers * left. */ -int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) { - struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); +#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ + struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct i915_ggtt_view view = i915_ggtt_view_normal; bool write = !!(vmf->flags & FAULT_FLAG_WRITE); + struct i915_vma *vma; pgoff_t page_offset; - unsigned long pfn; + unsigned int flags; int ret; /* We don't use vmf->pgoff since that has the fake offset */ - page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> + page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >> PAGE_SHIFT; trace_i915_gem_object_fault(obj, page_offset, true, write); @@ -1669,79 +1735,71 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) goto err_unlock; } - /* Use a partial view if the object is bigger than the aperture. */ - if (obj->base.size >= ggtt->mappable_end && - !i915_gem_object_is_tiled(obj)) { - static const unsigned int chunk_size = 256; // 1 MiB + /* If the object is smaller than a couple of partial vma, it is + * not worth only creating a single partial vma - we may as well + * clear enough space for the full object. + */ + flags = PIN_MAPPABLE; + if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) + flags |= PIN_NONBLOCK | PIN_NONFAULT; + + /* Now pin it into the GTT as needed */ + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); + if (IS_ERR(vma)) { + struct i915_ggtt_view view; + unsigned int chunk_size; + + /* Use a partial view if it is bigger than available space */ + chunk_size = MIN_CHUNK_PAGES; + if (i915_gem_object_is_tiled(obj)) + chunk_size = max(chunk_size, tile_row_pages(obj)); memset(&view, 0, sizeof(view)); view.type = I915_GGTT_VIEW_PARTIAL; view.params.partial.offset = rounddown(page_offset, chunk_size); view.params.partial.size = - min_t(unsigned int, - chunk_size, - (vma->vm_end - vma->vm_start)/PAGE_SIZE - + min_t(unsigned int, chunk_size, + (area->vm_end - area->vm_start) / PAGE_SIZE - view.params.partial.offset); - } - /* Now pin it into the GTT if needed */ - ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); - if (ret) + /* If the partial covers the entire object, just create a + * normal VMA. + */ + if (chunk_size >= obj->base.size >> PAGE_SHIFT) + view.type = I915_GGTT_VIEW_NORMAL; + + /* Userspace is now writing through an untracked VMA, abandon + * all hope that the hardware is able to track future writes. + */ + obj->frontbuffer_ggtt_origin = ORIGIN_CPU; + + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); + } + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto err_unlock; + } ret = i915_gem_object_set_to_gtt_domain(obj, write); if (ret) goto err_unpin; - ret = i915_gem_object_get_fence(obj); + ret = i915_vma_get_fence(vma); if (ret) goto err_unpin; /* Finally, remap it using the new GTT offset */ - pfn = ggtt->mappable_base + - i915_gem_obj_ggtt_offset_view(obj, &view); - pfn >>= PAGE_SHIFT; - - if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) { - /* Overriding existing pages in partial view does not cause - * us any trouble as TLBs are still valid because the fault - * is due to userspace losing part of the mapping or never - * having accessed it before (at this partials' range). - */ - unsigned long base = vma->vm_start + - (view.params.partial.offset << PAGE_SHIFT); - unsigned int i; - - for (i = 0; i < view.params.partial.size; i++) { - ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i); - if (ret) - break; - } - - obj->fault_mappable = true; - } else { - if (!obj->fault_mappable) { - unsigned long size = min_t(unsigned long, - vma->vm_end - vma->vm_start, - obj->base.size); - int i; - - for (i = 0; i < size >> PAGE_SHIFT; i++) { - ret = vm_insert_pfn(vma, - (unsigned long)vma->vm_start + i * PAGE_SIZE, - pfn + i); - if (ret) - break; - } + ret = remap_io_mapping(area, + area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT), + (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, + min_t(u64, vma->size, area->vm_end - area->vm_start), + &ggtt->mappable); + if (ret) + goto err_unpin; - obj->fault_mappable = true; - } else - ret = vm_insert_pfn(vma, - (unsigned long)vmf->virtual_address, - pfn + page_offset); - } + obj->fault_mappable = true; err_unpin: - i915_gem_object_ggtt_unpin_view(obj, &view); + __i915_vma_unpin(vma); err_unlock: mutex_unlock(&dev->struct_mutex); err_rpm: @@ -2077,10 +2135,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) list_del(&obj->global_list); if (obj->mapping) { - if (is_vmalloc_addr(obj->mapping)) - vunmap(obj->mapping); + void *ptr; + + ptr = ptr_mask_bits(obj->mapping); + if (is_vmalloc_addr(ptr)) + vunmap(ptr); else - kunmap(kmap_to_page(obj->mapping)); + kunmap(kmap_to_page(ptr)); + obj->mapping = NULL; } @@ -2253,7 +2315,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) } /* The 'mapping' part of i915_gem_object_pin_map() below */ -static void *i915_gem_object_map(const struct drm_i915_gem_object *obj) +static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, + enum i915_map_type type) { unsigned long n_pages = obj->base.size >> PAGE_SHIFT; struct sg_table *sgt = obj->pages; @@ -2262,10 +2325,11 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj) struct page *stack_pages[32]; struct page **pages = stack_pages; unsigned long i = 0; + pgprot_t pgprot; void *addr; /* A single page can always be kmapped */ - if (n_pages == 1) + if (n_pages == 1 && type == I915_MAP_WB) return kmap(sg_page(sgt->sgl)); if (n_pages > ARRAY_SIZE(stack_pages)) { @@ -2281,7 +2345,15 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj) /* Check that we have the expected number of pages */ GEM_BUG_ON(i != n_pages); - addr = vmap(pages, n_pages, 0, PAGE_KERNEL); + switch (type) { + case I915_MAP_WB: + pgprot = PAGE_KERNEL; + break; + case I915_MAP_WC: + pgprot = pgprot_writecombine(PAGE_KERNEL_IO); + break; + } + addr = vmap(pages, n_pages, 0, pgprot); if (pages != stack_pages) drm_free_large(pages); @@ -2290,27 +2362,54 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj) } /* get, pin, and map the pages of the object into kernel space */ -void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) +void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, + enum i915_map_type type) { + enum i915_map_type has_type; + bool pinned; + void *ptr; int ret; lockdep_assert_held(&obj->base.dev->struct_mutex); + GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); ret = i915_gem_object_get_pages(obj); if (ret) return ERR_PTR(ret); i915_gem_object_pin_pages(obj); + pinned = obj->pages_pin_count > 1; - if (!obj->mapping) { - obj->mapping = i915_gem_object_map(obj); - if (!obj->mapping) { - i915_gem_object_unpin_pages(obj); - return ERR_PTR(-ENOMEM); + ptr = ptr_unpack_bits(obj->mapping, has_type); + if (ptr && has_type != type) { + if (pinned) { + ret = -EBUSY; + goto err; } + + if (is_vmalloc_addr(ptr)) + vunmap(ptr); + else + kunmap(kmap_to_page(ptr)); + + ptr = obj->mapping = NULL; + } + + if (!ptr) { + ptr = i915_gem_object_map(obj, type); + if (!ptr) { + ret = -ENOMEM; + goto err; + } + + obj->mapping = ptr_pack_bits(ptr, type); } - return obj->mapping; + return ptr; + +err: + i915_gem_object_unpin_pages(obj); + return ERR_PTR(ret); } static void @@ -2423,15 +2522,11 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) struct drm_i915_gem_request *request; struct intel_ring *ring; - request = i915_gem_active_peek(&engine->last_request, - &engine->i915->drm.struct_mutex); - /* Mark all pending requests as complete so that any concurrent * (lockless) lookup doesn't try and wait upon the request as we * reset it. */ - if (request) - intel_engine_init_seqno(engine, request->fence.seqno); + intel_engine_init_seqno(engine, engine->last_submitted_seqno); /* * Clear the execlists queue up before freeing the requests, as those @@ -2453,6 +2548,8 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) * implicit references on things like e.g. ppgtt address spaces through * the request. */ + request = i915_gem_active_raw(&engine->last_request, + &engine->i915->drm.struct_mutex); if (request) i915_gem_request_retire_upto(request); GEM_BUG_ON(intel_engine_is_active(engine)); @@ -2526,7 +2623,6 @@ i915_gem_idle_work_handler(struct work_struct *work) container_of(work, typeof(*dev_priv), gt.idle_work.work); struct drm_device *dev = &dev_priv->drm; struct intel_engine_cs *engine; - unsigned int stuck_engines; bool rearm_hangcheck; if (!READ_ONCE(dev_priv->gt.awake)) @@ -2556,15 +2652,6 @@ i915_gem_idle_work_handler(struct work_struct *work) dev_priv->gt.awake = false; rearm_hangcheck = false; - /* As we have disabled hangcheck, we need to unstick any waiters still - * hanging around. However, as we may be racing against the interrupt - * handler or the waiters themselves, we skip enabling the fake-irq. - */ - stuck_engines = intel_kick_waiters(dev_priv); - if (unlikely(stuck_engines)) - DRM_DEBUG_DRIVER("kicked stuck waiters (%x)...missed irq?\n", - stuck_engines); - if (INTEL_GEN(dev_priv) >= 6) gen6_rps_idle(dev_priv); intel_runtime_pm_put(dev_priv); @@ -2734,27 +2821,6 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, return 0; } -static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) -{ - u32 old_write_domain, old_read_domains; - - /* Force a pagefault for domain tracking on next user access */ - i915_gem_release_mmap(obj); - - if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) - return; - - old_read_domains = obj->base.read_domains; - old_write_domain = obj->base.write_domain; - - obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; - obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; - - trace_i915_gem_object_change_domain(obj, - old_read_domains, - old_write_domain); -} - static void __i915_vma_iounmap(struct i915_vma *vma) { GEM_BUG_ON(i915_vma_is_pinned(vma)); @@ -2809,16 +2875,17 @@ int i915_vma_unbind(struct i915_vma *vma) GEM_BUG_ON(obj->bind_count == 0); GEM_BUG_ON(!obj->pages); - if (i915_vma_is_ggtt(vma) && - vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { - i915_gem_object_finish_gtt(obj); - + if (i915_vma_is_map_and_fenceable(vma)) { /* release the fence reg _after_ flushing */ - ret = i915_gem_object_put_fence(obj); + ret = i915_vma_put_fence(vma); if (ret) return ret; + /* Force a pagefault for domain tracking on next user access */ + i915_gem_release_mmap(obj); + __i915_vma_iounmap(vma); + vma->flags &= ~I915_VMA_CAN_FENCE; } if (likely(!vma->vm->closed)) { @@ -2830,15 +2897,12 @@ int i915_vma_unbind(struct i915_vma *vma) drm_mm_remove_node(&vma->node); list_move_tail(&vma->vm_link, &vma->vm->unbound_list); - if (i915_vma_is_ggtt(vma)) { - if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { - obj->map_and_fenceable = false; - } else if (vma->ggtt_view.pages) { - sg_free_table(vma->ggtt_view.pages); - kfree(vma->ggtt_view.pages); - } - vma->ggtt_view.pages = NULL; + if (vma->pages != obj->pages) { + GEM_BUG_ON(!vma->pages); + sg_free_table(vma->pages); + kfree(vma->pages); } + vma->pages = NULL; /* Since the unbound list is global, only move to that list if * no more VMAs exist. */ @@ -2930,7 +2994,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); struct drm_i915_gem_object *obj = vma->obj; u64 start, end; - u64 min_alignment; int ret; GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); @@ -2941,17 +3004,10 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) size = i915_gem_get_ggtt_size(dev_priv, size, i915_gem_object_get_tiling(obj)); - min_alignment = - i915_gem_get_ggtt_alignment(dev_priv, size, - i915_gem_object_get_tiling(obj), - flags & PIN_MAPPABLE); - if (alignment == 0) - alignment = min_alignment; - if (alignment & (min_alignment - 1)) { - DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n", - alignment, min_alignment); - return -EINVAL; - } + alignment = max(max(alignment, vma->display_alignment), + i915_gem_get_ggtt_alignment(dev_priv, size, + i915_gem_object_get_tiling(obj), + flags & PIN_MAPPABLE)); start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; @@ -3091,51 +3147,72 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj, static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) { - uint32_t old_write_domain; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) return; /* No actual flushing is required for the GTT write domain. Writes - * to it immediately go to main memory as far as we know, so there's + * to it "immediately" go to main memory as far as we know, so there's * no chipset flush. It also doesn't land in render cache. * * However, we do have to enforce the order so that all writes through * the GTT land before any writes to the device, such as updates to * the GATT itself. + * + * We also have to wait a bit for the writes to land from the GTT. + * An uncached read (i.e. mmio) seems to be ideal for the round-trip + * timing. This issue has only been observed when switching quickly + * between GTT writes and CPU reads from inside the kernel on recent hw, + * and it appears to only affect discrete GTT blocks (i.e. on LLC + * system agents we cannot reproduce this behaviour). */ wmb(); + if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) + POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base)); - old_write_domain = obj->base.write_domain; - obj->base.write_domain = 0; - - intel_fb_obj_flush(obj, false, ORIGIN_GTT); + intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT)); + obj->base.write_domain = 0; trace_i915_gem_object_change_domain(obj, obj->base.read_domains, - old_write_domain); + I915_GEM_DOMAIN_GTT); } /** Flushes the CPU write domain for the object if it's dirty. */ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) { - uint32_t old_write_domain; - if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) return; if (i915_gem_clflush_object(obj, obj->pin_display)) i915_gem_chipset_flush(to_i915(obj->base.dev)); - old_write_domain = obj->base.write_domain; - obj->base.write_domain = 0; - intel_fb_obj_flush(obj, false, ORIGIN_CPU); + obj->base.write_domain = 0; trace_i915_gem_object_change_domain(obj, obj->base.read_domains, - old_write_domain); + I915_GEM_DOMAIN_CPU); +} + +static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (!i915_vma_is_ggtt(vma)) + continue; + + if (i915_vma_is_active(vma)) + continue; + + if (!drm_mm_node_allocated(&vma->node)) + continue; + + list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + } } /** @@ -3150,7 +3227,6 @@ int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { uint32_t old_write_domain, old_read_domains; - struct i915_vma *vma; int ret; ret = i915_gem_object_wait_rendering(obj, !write); @@ -3200,11 +3276,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) old_write_domain); /* And bump the LRU for this access */ - vma = i915_gem_obj_to_ggtt(obj); - if (vma && - drm_mm_node_allocated(&vma->node) && - !i915_vma_is_active(vma)) - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + i915_gem_object_bump_inactive_ggtt(obj); return 0; } @@ -3295,9 +3367,11 @@ restart: * dropped the fence as all snoopable access is * supposed to be linear. */ - ret = i915_gem_object_put_fence(obj); - if (ret) - return ret; + list_for_each_entry(vma, &obj->vma_list, obj_link) { + ret = i915_vma_put_fence(vma); + if (ret) + return ret; + } } else { /* We either have incoherent backing store and * so no GTT access or the architecture is fully @@ -3424,11 +3498,12 @@ rpm_put: * Can be called from an uninterruptible phase (modesetting) and allows * any flushes to be pipelined (for pageflips). */ -int +struct i915_vma * i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, const struct i915_ggtt_view *view) { + struct i915_vma *vma; u32 old_read_domains, old_write_domain; int ret; @@ -3448,19 +3523,31 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, */ ret = i915_gem_object_set_cache_level(obj, HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE); - if (ret) + if (ret) { + vma = ERR_PTR(ret); goto err_unpin_display; + } /* As the user may map the buffer once pinned in the display plane * (e.g. libkms for the bootup splash), we have to ensure that we - * always use map_and_fenceable for all scanout buffers. + * always use map_and_fenceable for all scanout buffers. However, + * it may simply be too big to fit into mappable, in which case + * put it anyway and hope that userspace can cope (but always first + * try to preserve the existing ABI). */ - ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment, - view->type == I915_GGTT_VIEW_NORMAL ? - PIN_MAPPABLE : 0); - if (ret) + vma = ERR_PTR(-ENOSPC); + if (view->type == I915_GGTT_VIEW_NORMAL) + vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, + PIN_MAPPABLE | PIN_NONBLOCK); + if (IS_ERR(vma)) + vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0); + if (IS_ERR(vma)) goto err_unpin_display; + vma->display_alignment = max_t(u64, vma->display_alignment, alignment); + + WARN_ON(obj->pin_display > i915_vma_pin_count(vma)); + i915_gem_object_flush_cpu_write_domain(obj); old_write_domain = obj->base.write_domain; @@ -3476,23 +3563,28 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, old_read_domains, old_write_domain); - return 0; + return vma; err_unpin_display: obj->pin_display--; - return ret; + return vma; } void -i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view) +i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) { - if (WARN_ON(obj->pin_display == 0)) + if (WARN_ON(vma->obj->pin_display == 0)) return; - i915_gem_object_ggtt_unpin_view(obj, view); + if (--vma->obj->pin_display == 0) + vma->display_alignment = 0; - obj->pin_display--; + /* Bump the LRU to try and avoid premature eviction whilst flipping */ + if (!i915_vma_is_active(vma)) + list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + + i915_vma_unpin(vma); + WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma)); } /** @@ -3605,8 +3697,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) static bool i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { - struct drm_i915_gem_object *obj = vma->obj; - if (!drm_mm_node_allocated(&vma->node)) return false; @@ -3616,7 +3706,7 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) if (alignment && vma->node.start & (alignment - 1)) return true; - if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) + if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) return true; if (flags & PIN_OFFSET_BIAS && @@ -3638,10 +3728,10 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) u32 fence_size, fence_alignment; fence_size = i915_gem_get_ggtt_size(dev_priv, - obj->base.size, + vma->size, i915_gem_object_get_tiling(obj)); fence_alignment = i915_gem_get_ggtt_alignment(dev_priv, - obj->base.size, + vma->size, i915_gem_object_get_tiling(obj), true); @@ -3651,7 +3741,10 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) mappable = (vma->node.start + fence_size <= dev_priv->ggtt.mappable_end); - obj->map_and_fenceable = mappable && fenceable; + if (mappable && fenceable) + vma->flags |= I915_VMA_CAN_FENCE; + else + vma->flags &= ~I915_VMA_CAN_FENCE; } int __i915_vma_do_pin(struct i915_vma *vma, @@ -3689,53 +3782,46 @@ err: return ret; } -int +struct i915_vma * i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, u64 size, u64 alignment, u64 flags) { + struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base; struct i915_vma *vma; int ret; - if (!view) - view = &i915_ggtt_view_normal; - - vma = i915_gem_obj_lookup_or_create_ggtt_vma(obj, view); + vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view); if (IS_ERR(vma)) - return PTR_ERR(vma); + return vma; if (i915_vma_misplaced(vma, size, alignment, flags)) { if (flags & PIN_NONBLOCK && (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) - return -ENOSPC; + return ERR_PTR(-ENOSPC); WARN(i915_vma_is_pinned(vma), "bo is already pinned in ggtt with incorrect alignment:" - " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d," - " obj->map_and_fenceable=%d\n", - upper_32_bits(vma->node.start), - lower_32_bits(vma->node.start), - alignment, + " offset=%08x, req.alignment=%llx," + " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", + i915_ggtt_offset(vma), alignment, !!(flags & PIN_MAPPABLE), - obj->map_and_fenceable); + i915_vma_is_map_and_fenceable(vma)); ret = i915_vma_unbind(vma); if (ret) - return ret; + return ERR_PTR(ret); } - return i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); -} + ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); + if (ret) + return ERR_PTR(ret); -void -i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view) -{ - i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj, view)); + return vma; } -static __always_inline unsigned __busy_read_flag(unsigned int id) +static __always_inline unsigned int __busy_read_flag(unsigned int id) { /* Note that we could alias engines in the execbuf API, but * that would be very unwise as it prevents userspace from @@ -3750,39 +3836,92 @@ static __always_inline unsigned __busy_read_flag(unsigned int id) static __always_inline unsigned int __busy_write_id(unsigned int id) { - return id; + /* The uABI guarantees an active writer is also amongst the read + * engines. This would be true if we accessed the activity tracking + * under the lock, but as we perform the lookup of the object and + * its activity locklessly we can not guarantee that the last_write + * being active implies that we have set the same engine flag from + * last_read - hence we always set both read and write busy for + * last_write. + */ + return id | __busy_read_flag(id); } -static __always_inline unsigned +static __always_inline unsigned int __busy_set_if_active(const struct i915_gem_active *active, unsigned int (*flag)(unsigned int id)) { - /* For more discussion about the barriers and locking concerns, - * see __i915_gem_active_get_rcu(). - */ - do { - struct drm_i915_gem_request *request; - unsigned int id; - - request = rcu_dereference(active->request); - if (!request || i915_gem_request_completed(request)) - return 0; + struct drm_i915_gem_request *request; - id = request->engine->exec_id; + request = rcu_dereference(active->request); + if (!request || i915_gem_request_completed(request)) + return 0; - /* Check that the pointer wasn't reassigned and overwritten. */ - if (request == rcu_access_pointer(active->request)) - return flag(id); - } while (1); + /* This is racy. See __i915_gem_active_get_rcu() for an in detail + * discussion of how to handle the race correctly, but for reporting + * the busy state we err on the side of potentially reporting the + * wrong engine as being busy (but we guarantee that the result + * is at least self-consistent). + * + * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated + * whilst we are inspecting it, even under the RCU read lock as we are. + * This means that there is a small window for the engine and/or the + * seqno to have been overwritten. The seqno will always be in the + * future compared to the intended, and so we know that if that + * seqno is idle (on whatever engine) our request is idle and the + * return 0 above is correct. + * + * The issue is that if the engine is switched, it is just as likely + * to report that it is busy (but since the switch happened, we know + * the request should be idle). So there is a small chance that a busy + * result is actually the wrong engine. + * + * So why don't we care? + * + * For starters, the busy ioctl is a heuristic that is by definition + * racy. Even with perfect serialisation in the driver, the hardware + * state is constantly advancing - the state we report to the user + * is stale. + * + * The critical information for the busy-ioctl is whether the object + * is idle as userspace relies on that to detect whether its next + * access will stall, or if it has missed submitting commands to + * the hardware allowing the GPU to stall. We never generate a + * false-positive for idleness, thus busy-ioctl is reliable at the + * most fundamental level, and we maintain the guarantee that a + * busy object left to itself will eventually become idle (and stay + * idle!). + * + * We allow ourselves the leeway of potentially misreporting the busy + * state because that is an optimisation heuristic that is constantly + * in flux. Being quickly able to detect the busy/idle state is much + * more important than accurate logging of exactly which engines were + * busy. + * + * For accuracy in reporting the engine, we could use + * + * result = 0; + * request = __i915_gem_active_get_rcu(active); + * if (request) { + * if (!i915_gem_request_completed(request)) + * result = flag(request->engine->exec_id); + * i915_gem_request_put(request); + * } + * + * but that still remains susceptible to both hardware and userspace + * races. So we accept making the result of that race slightly worse, + * given the rarity of the race and its low impact on the result. + */ + return flag(READ_ONCE(request->engine->exec_id)); } -static inline unsigned +static __always_inline unsigned int busy_check_reader(const struct i915_gem_active *active) { return __busy_set_if_active(active, __busy_read_flag); } -static inline unsigned +static __always_inline unsigned int busy_check_writer(const struct i915_gem_active *active) { return __busy_set_if_active(active, __busy_write_id); @@ -3821,11 +3960,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * retired and freed. We take a local copy of the pointer, * but before we add its engine into the busy set, the other * thread reallocates it and assigns it to a task on another - * engine with a fresh and incomplete seqno. - * - * So after we lookup the engine's id, we double check that - * the active request is the same and only then do we add it - * into the busy set. + * engine with a fresh and incomplete seqno. Guarding against + * that requires careful serialisation and reference counting, + * i.e. using __i915_gem_active_get_request_rcu(). We don't, + * instead we expect that if the result is busy, which engines + * are busy is not completely reliable - we only guarantee + * that the object was busy. */ rcu_read_lock(); @@ -3833,9 +3973,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, args->busy |= busy_check_reader(&obj->last_read[idx]); /* For ABI sanity, we only care that the write engine is in - * the set of read engines. This is ensured by the ordering - * of setting last_read/last_write in i915_vma_move_to_active, - * and then in reverse in retire. + * the set of read engines. This should be ensured by the + * ordering of setting last_read/last_write in + * i915_vma_move_to_active(), and then in reverse in retire. + * However, for good measure, we always report the last_write + * request as a busy read as well as being a busy write. * * We don't care that the set of active read/write engines * may change during construction of the result, as it is @@ -3920,14 +4062,13 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, i915_gem_object_retire__read); init_request_active(&obj->last_write, i915_gem_object_retire__write); - init_request_active(&obj->last_fence, NULL); INIT_LIST_HEAD(&obj->obj_exec_link); INIT_LIST_HEAD(&obj->vma_list); INIT_LIST_HEAD(&obj->batch_pool_link); obj->ops = ops; - obj->fence_reg = I915_FENCE_REG_NONE; + obj->frontbuffer_ggtt_origin = ORIGIN_GTT; obj->madv = I915_MADV_WILLNEED; i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); @@ -4082,32 +4223,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) intel_runtime_pm_put(dev_priv); } -struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, obj_link) { - if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && - vma->vm == vm) - return vma; - } - return NULL; -} - -struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view) -{ - struct i915_vma *vma; - - GEM_BUG_ON(!view); - - list_for_each_entry(vma, &obj->vma_list, obj_link) - if (i915_vma_is_ggtt(vma) && - i915_ggtt_view_equal(&vma->ggtt_view, view)) - return vma; - return NULL; -} - int i915_gem_suspend(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -4383,6 +4498,7 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; + int i; if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) @@ -4398,6 +4514,13 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) I915_READ(vgtif_reg(avail_rs.fence_num)); /* Initialize fence registers to zero */ + for (i = 0; i < dev_priv->num_fence_regs; i++) { + struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; + + fence->i915 = dev_priv; + fence->id = i; + list_add_tail(&fence->link, &dev_priv->mm.fence_list); + } i915_gem_restore_fences(dev); i915_gem_detect_bit_6_swizzle(dev); @@ -4433,8 +4556,6 @@ i915_gem_load_init(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.fence_list); for (i = 0; i < I915_NUM_ENGINES; i++) init_engine_lists(&dev_priv->engine[i]); - for (i = 0; i < I915_MAX_NUM_FENCES; i++) - INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->gt.retire_work, i915_gem_retire_work_handler); INIT_DELAYED_WORK(&dev_priv->gt.idle_work, @@ -4444,8 +4565,6 @@ i915_gem_load_init(struct drm_device *dev) dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; - INIT_LIST_HEAD(&dev_priv->mm.fence_list); - init_waitqueue_head(&dev_priv->pending_flip_queue); dev_priv->mm.interruptible = true; @@ -4575,97 +4694,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, } } -/* All the new VM stuff */ -u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, - struct i915_address_space *vm) -{ - struct drm_i915_private *dev_priv = to_i915(o->base.dev); - struct i915_vma *vma; - - WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - - list_for_each_entry(vma, &o->vma_list, obj_link) { - if (i915_vma_is_ggtt(vma) && - vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) - continue; - if (vma->vm == vm) - return vma->node.start; - } - - WARN(1, "%s vma for this object not found.\n", - i915_is_ggtt(vm) ? "global" : "ppgtt"); - return -1; -} - -u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, - const struct i915_ggtt_view *view) -{ - struct i915_vma *vma; - - list_for_each_entry(vma, &o->vma_list, obj_link) - if (i915_vma_is_ggtt(vma) && - i915_ggtt_view_equal(&vma->ggtt_view, view)) - return vma->node.start; - - WARN(1, "global vma for this object not found. (view=%u)\n", view->type); - return -1; -} - -bool i915_gem_obj_bound(struct drm_i915_gem_object *o, - struct i915_address_space *vm) -{ - struct i915_vma *vma; - - list_for_each_entry(vma, &o->vma_list, obj_link) { - if (i915_vma_is_ggtt(vma) && - vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) - continue; - if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) - return true; - } - - return false; -} - -bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, - const struct i915_ggtt_view *view) -{ - struct i915_vma *vma; - - list_for_each_entry(vma, &o->vma_list, obj_link) - if (i915_vma_is_ggtt(vma) && - i915_ggtt_view_equal(&vma->ggtt_view, view) && - drm_mm_node_allocated(&vma->node)) - return true; - - return false; -} - -unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) -{ - struct i915_vma *vma; - - GEM_BUG_ON(list_empty(&o->vma_list)); - - list_for_each_entry(vma, &o->vma_list, obj_link) { - if (i915_vma_is_ggtt(vma) && - vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) - return vma->node.size; - } - - return 0; -} - -bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) -{ - struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, obj_link) - if (i915_vma_is_pinned(vma)) - return true; - - return false; -} - /* Like i915_gem_object_get_page(), but mark the returned page dirty */ struct page * i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index bb72af5..35950ee 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -155,9 +155,10 @@ void i915_gem_context_free(struct kref *ctx_ref) if (ce->ring) intel_ring_free(ce->ring); - i915_gem_object_put(ce->state); + i915_vma_put(ce->state); } + put_pid(ctx->pid); list_del(&ctx->link); ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); @@ -281,13 +282,24 @@ __create_hw_context(struct drm_device *dev, ctx->ggtt_alignment = get_context_alignment(dev_priv); if (dev_priv->hw_context_size) { - struct drm_i915_gem_object *obj = - i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_alloc_context_obj(dev, + dev_priv->hw_context_size); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto err_out; } - ctx->engine[RCS].state = obj; + + vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + ret = PTR_ERR(vma); + goto err_out; + } + + ctx->engine[RCS].state = vma; } /* Default context will never have a file_priv */ @@ -300,6 +312,9 @@ __create_hw_context(struct drm_device *dev, ret = DEFAULT_CONTEXT_HANDLE; ctx->file_priv = file_priv; + if (file_priv) + ctx->pid = get_task_pid(current, PIDTYPE_PID); + ctx->user_handle = ret; /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there @@ -399,7 +414,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx, struct intel_context *ce = &ctx->engine[engine->id]; if (ce->state) - i915_gem_object_ggtt_unpin(ce->state); + i915_vma_unpin(ce->state); i915_gem_context_put(ctx); } @@ -568,7 +583,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) const int num_rings = /* Use an extended w/a on ivb+ if signalling from other rings */ i915.semaphores ? - hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 : + INTEL_INFO(dev_priv)->num_rings - 1 : 0; int len, ret; @@ -621,8 +636,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, - i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) | - flags); + i915_ggtt_offset(req->ctx->engine[RCS].state) | flags); /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * WaMiSetContext_Hang:snb,ivb,vlv @@ -651,7 +665,8 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); intel_ring_emit_reg(ring, last_reg); - intel_ring_emit(ring, engine->scratch.gtt_offset); + intel_ring_emit(ring, + i915_ggtt_offset(engine->scratch)); intel_ring_emit(ring, MI_NOOP); } intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); @@ -755,6 +770,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) struct i915_gem_context *to = req->ctx; struct intel_engine_cs *engine = req->engine; struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; + struct i915_vma *vma = to->engine[RCS].state; struct i915_gem_context *from; u32 hw_flags; int ret, i; @@ -762,9 +778,15 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) if (skip_rcs_switch(ppgtt, engine, to)) return 0; + /* Clear this page out of any CPU caches for coherent swap-in/out. */ + if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { + ret = i915_gem_object_set_to_gtt_domain(vma->obj, false); + if (ret) + return ret; + } + /* Trying to pin first makes error handling easier. */ - ret = i915_gem_object_ggtt_pin(to->engine[RCS].state, NULL, 0, - to->ggtt_alignment, 0); + ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL); if (ret) return ret; @@ -777,18 +799,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) */ from = engine->last_context; - /* - * Clear this page out of any CPU caches for coherent swap-in/out. Note - * that thanks to write = false in this call and us not setting any gpu - * write domains when putting a context object onto the active list - * (when switching away from it), this won't block. - * - * XXX: We need a real interface to do this instead of trickery. - */ - ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false); - if (ret) - goto unpin_out; - if (needs_pd_load_pre(ppgtt, engine, to)) { /* Older GENs and non render rings still want the load first, * "PP_DCLV followed by PP_DIR_BASE register through Load @@ -797,7 +807,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) trace_switch_mm(engine, to); ret = ppgtt->switch_mm(ppgtt, req); if (ret) - goto unpin_out; + goto err; } if (!to->engine[RCS].initialised || i915_gem_context_is_default(to)) @@ -814,7 +824,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) if (to != from || (hw_flags & MI_FORCE_RESTORE)) { ret = mi_set_context(req, hw_flags); if (ret) - goto unpin_out; + goto err; } /* The backing object for the context is done after switching to the @@ -824,8 +834,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { - struct drm_i915_gem_object *obj = from->engine[RCS].state; - /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the * object dirty. The only exception is that the context must be @@ -833,11 +841,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * able to defer doing this until we know the object would be * swapped, but there is no way to do that yet. */ - obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; - i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0); - - /* obj is kept alive until the next request by its active ref */ - i915_gem_object_ggtt_unpin(obj); + i915_vma_move_to_active(from->engine[RCS].state, req, 0); + /* state is kept alive until the next request */ + i915_vma_unpin(from->engine[RCS].state); i915_gem_context_put(from); } engine->last_context = i915_gem_context_get(to); @@ -882,8 +888,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) return 0; -unpin_out: - i915_gem_object_ggtt_unpin(to->engine[RCS].state); +err: + i915_vma_unpin(vma); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index c60a8d5b..10265bb 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -119,7 +119,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) if (ret) return ERR_PTR(ret); - addr = i915_gem_object_pin_map(obj); + addr = i915_gem_object_pin_map(obj, I915_MAP_WB); mutex_unlock(&dev->struct_mutex); return addr; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index f76c06e..815d5fb 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -47,7 +47,7 @@ gpu_is_idle(struct drm_i915_private *dev_priv) } static bool -mark_free(struct i915_vma *vma, struct list_head *unwind) +mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind) { if (i915_vma_is_pinned(vma)) return false; @@ -55,6 +55,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) if (WARN_ON(!list_empty(&vma->exec_list))) return false; + if (flags & PIN_NONFAULT && vma->obj->fault_mappable) + return false; + list_add(&vma->exec_list, unwind); return drm_mm_scan_add_block(&vma->node); } @@ -129,7 +132,7 @@ search_again: phase = phases; do { list_for_each_entry(vma, *phase, vm_link) - if (mark_free(vma, &eviction_list)) + if (mark_free(vma, flags, &eviction_list)) goto found; } while (*++phase); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index c494b79..601156c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -39,6 +39,8 @@ #include "intel_drv.h" #include "intel_frontbuffer.h" +#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */ + #define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_FENCE (1<<30) #define __EXEC_OBJECT_NEEDS_MAP (1<<29) @@ -59,6 +61,7 @@ struct i915_execbuffer_params { }; struct eb_vmas { + struct drm_i915_private *i915; struct list_head vmas; int and; union { @@ -68,7 +71,8 @@ struct eb_vmas { }; static struct eb_vmas * -eb_create(struct drm_i915_gem_execbuffer2 *args) +eb_create(struct drm_i915_private *i915, + struct drm_i915_gem_execbuffer2 *args) { struct eb_vmas *eb = NULL; @@ -95,6 +99,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args) } else eb->and = -args->buffer_count; + eb->i915 = i915; INIT_LIST_HEAD(&eb->vmas); return eb; } @@ -180,8 +185,8 @@ eb_lookup_vmas(struct eb_vmas *eb, * from the (obj, vm) we don't run the risk of creating * duplicated vmas for the same vm. */ - vma = i915_gem_obj_lookup_or_create_vma(obj, vm); - if (IS_ERR(vma)) { + vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL); + if (unlikely(IS_ERR(vma))) { DRM_DEBUG("Failed to lookup VMA\n"); ret = PTR_ERR(vma); goto err; @@ -245,7 +250,6 @@ static void i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) { struct drm_i915_gem_exec_object2 *entry; - struct drm_i915_gem_object *obj = vma->obj; if (!drm_mm_node_allocated(&vma->node)) return; @@ -253,7 +257,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) entry = vma->exec_entry; if (entry->flags & __EXEC_OBJECT_HAS_FENCE) - i915_gem_object_unpin_fence(obj); + i915_vma_unpin_fence(vma); if (entry->flags & __EXEC_OBJECT_HAS_PIN) __i915_vma_unpin(vma); @@ -271,13 +275,19 @@ static void eb_destroy(struct eb_vmas *eb) exec_list); list_del_init(&vma->exec_list); i915_gem_execbuffer_unreserve_vma(vma); - i915_gem_object_put(vma->obj); + i915_vma_put(vma); } kfree(eb); } static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) { + if (!i915_gem_object_has_struct_page(obj)) + return false; + + if (DBG_USE_CPU_RELOC) + return DBG_USE_CPU_RELOC > 0; + return (HAS_LLC(obj->base.dev) || obj->base.write_domain == I915_GEM_DOMAIN_CPU || obj->cache_level != I915_CACHE_NONE); @@ -302,137 +312,243 @@ static inline uint64_t gen8_noncanonical_addr(uint64_t address) } static inline uint64_t -relocation_target(struct drm_i915_gem_relocation_entry *reloc, +relocation_target(const struct drm_i915_gem_relocation_entry *reloc, uint64_t target_offset) { return gen8_canonical_addr((int)reloc->delta + target_offset); } -static int -relocate_entry_cpu(struct drm_i915_gem_object *obj, - struct drm_i915_gem_relocation_entry *reloc, - uint64_t target_offset) +struct reloc_cache { + struct drm_i915_private *i915; + struct drm_mm_node node; + unsigned long vaddr; + unsigned int page; + bool use_64bit_reloc; +}; + +static void reloc_cache_init(struct reloc_cache *cache, + struct drm_i915_private *i915) { - struct drm_device *dev = obj->base.dev; - uint32_t page_offset = offset_in_page(reloc->offset); - uint64_t delta = relocation_target(reloc, target_offset); - char *vaddr; - int ret; + cache->page = -1; + cache->vaddr = 0; + cache->i915 = i915; + cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8; + cache->node.allocated = false; +} - ret = i915_gem_object_set_to_cpu_domain(obj, true); - if (ret) - return ret; +static inline void *unmask_page(unsigned long p) +{ + return (void *)(uintptr_t)(p & PAGE_MASK); +} - vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, - reloc->offset >> PAGE_SHIFT)); - *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta); +static inline unsigned int unmask_flags(unsigned long p) +{ + return p & ~PAGE_MASK; +} + +#define KMAP 0x4 /* after CLFLUSH_FLAGS */ + +static void reloc_cache_fini(struct reloc_cache *cache) +{ + void *vaddr; - if (INTEL_INFO(dev)->gen >= 8) { - page_offset = offset_in_page(page_offset + sizeof(uint32_t)); + if (!cache->vaddr) + return; + + vaddr = unmask_page(cache->vaddr); + if (cache->vaddr & KMAP) { + if (cache->vaddr & CLFLUSH_AFTER) + mb(); - if (page_offset == 0) { - kunmap_atomic(vaddr); - vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, - (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); + kunmap_atomic(vaddr); + i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm); + } else { + wmb(); + io_mapping_unmap_atomic((void __iomem *)vaddr); + if (cache->node.allocated) { + struct i915_ggtt *ggtt = &cache->i915->ggtt; + + ggtt->base.clear_range(&ggtt->base, + cache->node.start, + cache->node.size, + true); + drm_mm_remove_node(&cache->node); + } else { + i915_vma_unpin((struct i915_vma *)cache->node.mm); } + } +} + +static void *reloc_kmap(struct drm_i915_gem_object *obj, + struct reloc_cache *cache, + int page) +{ + void *vaddr; + + if (cache->vaddr) { + kunmap_atomic(unmask_page(cache->vaddr)); + } else { + unsigned int flushes; + int ret; - *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta); + ret = i915_gem_obj_prepare_shmem_write(obj, &flushes); + if (ret) + return ERR_PTR(ret); + + BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS); + BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK); + + cache->vaddr = flushes | KMAP; + cache->node.mm = (void *)obj; + if (flushes) + mb(); } - kunmap_atomic(vaddr); + vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page)); + cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; + cache->page = page; - return 0; + return vaddr; } -static int -relocate_entry_gtt(struct drm_i915_gem_object *obj, - struct drm_i915_gem_relocation_entry *reloc, - uint64_t target_offset) +static void *reloc_iomap(struct drm_i915_gem_object *obj, + struct reloc_cache *cache, + int page) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - uint64_t delta = relocation_target(reloc, target_offset); - uint64_t offset; - void __iomem *reloc_page; - int ret; + struct i915_ggtt *ggtt = &cache->i915->ggtt; + unsigned long offset; + void *vaddr; - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - return ret; + if (cache->node.allocated) { + wmb(); + ggtt->base.insert_page(&ggtt->base, + i915_gem_object_get_dma_address(obj, page), + cache->node.start, I915_CACHE_NONE, 0); + cache->page = page; + return unmask_page(cache->vaddr); + } - ret = i915_gem_object_put_fence(obj); - if (ret) - return ret; + if (cache->vaddr) { + io_mapping_unmap_atomic(unmask_page(cache->vaddr)); + } else { + struct i915_vma *vma; + int ret; - /* Map the page containing the relocation we're going to perform. */ - offset = i915_gem_obj_ggtt_offset(obj); - offset += reloc->offset; - reloc_page = io_mapping_map_atomic_wc(ggtt->mappable, - offset & PAGE_MASK); - iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset)); - - if (INTEL_INFO(dev)->gen >= 8) { - offset += sizeof(uint32_t); - - if (offset_in_page(offset) == 0) { - io_mapping_unmap_atomic(reloc_page); - reloc_page = - io_mapping_map_atomic_wc(ggtt->mappable, - offset); + if (use_cpu_reloc(obj)) + return NULL; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ERR_PTR(ret); + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, + PIN_MAPPABLE | PIN_NONBLOCK); + if (IS_ERR(vma)) { + memset(&cache->node, 0, sizeof(cache->node)); + ret = drm_mm_insert_node_in_range_generic + (&ggtt->base.mm, &cache->node, + 4096, 0, 0, + 0, ggtt->mappable_end, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + if (ret) + return ERR_PTR(ret); + } else { + ret = i915_vma_put_fence(vma); + if (ret) { + i915_vma_unpin(vma); + return ERR_PTR(ret); + } + + cache->node.start = vma->node.start; + cache->node.mm = (void *)vma; } + } - iowrite32(upper_32_bits(delta), - reloc_page + offset_in_page(offset)); + offset = cache->node.start; + if (cache->node.allocated) { + ggtt->base.insert_page(&ggtt->base, + i915_gem_object_get_dma_address(obj, page), + offset, I915_CACHE_NONE, 0); + } else { + offset += page << PAGE_SHIFT; } - io_mapping_unmap_atomic(reloc_page); + vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset); + cache->page = page; + cache->vaddr = (unsigned long)vaddr; - return 0; + return vaddr; } -static void -clflush_write32(void *addr, uint32_t value) +static void *reloc_vaddr(struct drm_i915_gem_object *obj, + struct reloc_cache *cache, + int page) { - /* This is not a fast path, so KISS. */ - drm_clflush_virt_range(addr, sizeof(uint32_t)); - *(uint32_t *)addr = value; - drm_clflush_virt_range(addr, sizeof(uint32_t)); + void *vaddr; + + if (cache->page == page) { + vaddr = unmask_page(cache->vaddr); + } else { + vaddr = NULL; + if ((cache->vaddr & KMAP) == 0) + vaddr = reloc_iomap(obj, cache, page); + if (!vaddr) + vaddr = reloc_kmap(obj, cache, page); + } + + return vaddr; } -static int -relocate_entry_clflush(struct drm_i915_gem_object *obj, - struct drm_i915_gem_relocation_entry *reloc, - uint64_t target_offset) +static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) { - struct drm_device *dev = obj->base.dev; - uint32_t page_offset = offset_in_page(reloc->offset); - uint64_t delta = relocation_target(reloc, target_offset); - char *vaddr; - int ret; + if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) { + if (flushes & CLFLUSH_BEFORE) { + clflushopt(addr); + mb(); + } - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - return ret; + *addr = value; - vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, - reloc->offset >> PAGE_SHIFT)); - clflush_write32(vaddr + page_offset, lower_32_bits(delta)); + /* Writes to the same cacheline are serialised by the CPU + * (including clflush). On the write path, we only require + * that it hits memory in an orderly fashion and place + * mb barriers at the start and end of the relocation phase + * to ensure ordering of clflush wrt to the system. + */ + if (flushes & CLFLUSH_AFTER) + clflushopt(addr); + } else + *addr = value; +} + +static int +relocate_entry(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_relocation_entry *reloc, + struct reloc_cache *cache, + u64 target_offset) +{ + u64 offset = reloc->offset; + bool wide = cache->use_64bit_reloc; + void *vaddr; - if (INTEL_INFO(dev)->gen >= 8) { - page_offset = offset_in_page(page_offset + sizeof(uint32_t)); + target_offset = relocation_target(reloc, target_offset); +repeat: + vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); - if (page_offset == 0) { - kunmap_atomic(vaddr); - vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, - (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); - } + clflush_write32(vaddr + offset_in_page(offset), + lower_32_bits(target_offset), + cache->vaddr); - clflush_write32(vaddr + page_offset, upper_32_bits(delta)); + if (wide) { + offset += sizeof(u32); + target_offset >>= 32; + wide = false; + goto repeat; } - kunmap_atomic(vaddr); - return 0; } @@ -453,7 +569,8 @@ static bool object_is_idle(struct drm_i915_gem_object *obj) static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_vmas *eb, - struct drm_i915_gem_relocation_entry *reloc) + struct drm_i915_gem_relocation_entry *reloc, + struct reloc_cache *cache) { struct drm_device *dev = obj->base.dev; struct drm_gem_object *target_obj; @@ -516,7 +633,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, /* Check that the relocation address is valid... */ if (unlikely(reloc->offset > - obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) { + obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) { DRM_DEBUG("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", obj, reloc->target_handle, @@ -536,23 +653,12 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, if (pagefault_disabled() && !object_is_idle(obj)) return -EFAULT; - if (use_cpu_reloc(obj)) - ret = relocate_entry_cpu(obj, reloc, target_offset); - else if (obj->map_and_fenceable) - ret = relocate_entry_gtt(obj, reloc, target_offset); - else if (static_cpu_has(X86_FEATURE_CLFLUSH)) - ret = relocate_entry_clflush(obj, reloc, target_offset); - else { - WARN_ONCE(1, "Impossible case in relocation handling\n"); - ret = -ENODEV; - } - + ret = relocate_entry(obj, reloc, cache, target_offset); if (ret) return ret; /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; - return 0; } @@ -564,9 +670,11 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; struct drm_i915_gem_relocation_entry __user *user_relocs; struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - int remain, ret; + struct reloc_cache cache; + int remain, ret = 0; user_relocs = u64_to_user_ptr(entry->relocs_ptr); + reloc_cache_init(&cache, eb->i915); remain = entry->relocation_count; while (remain) { @@ -576,19 +684,23 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, count = ARRAY_SIZE(stack_reloc); remain -= count; - if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) - return -EFAULT; + if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) { + ret = -EFAULT; + goto out; + } do { u64 offset = r->presumed_offset; - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r); + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); if (ret) - return ret; + goto out; if (r->presumed_offset != offset && - __put_user(r->presumed_offset, &user_relocs->presumed_offset)) { - return -EFAULT; + __put_user(r->presumed_offset, + &user_relocs->presumed_offset)) { + ret = -EFAULT; + goto out; } user_relocs++; @@ -596,7 +708,9 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, } while (--count); } - return 0; +out: + reloc_cache_fini(&cache); + return ret; #undef N_RELOC } @@ -606,15 +720,18 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, struct drm_i915_gem_relocation_entry *relocs) { const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - int i, ret; + struct reloc_cache cache; + int i, ret = 0; + reloc_cache_init(&cache, eb->i915); for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]); + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); if (ret) - return ret; + break; } + reloc_cache_fini(&cache); - return 0; + return ret; } static int @@ -693,11 +810,11 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, entry->flags |= __EXEC_OBJECT_HAS_PIN; if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { - ret = i915_gem_object_get_fence(obj); + ret = i915_vma_get_fence(vma); if (ret) return ret; - if (i915_gem_object_pin_fence(obj)) + if (i915_vma_pin_fence(vma)) entry->flags |= __EXEC_OBJECT_HAS_FENCE; } @@ -739,7 +856,6 @@ static bool eb_vma_misplaced(struct i915_vma *vma) { struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - struct drm_i915_gem_object *obj = vma->obj; WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !i915_vma_is_ggtt(vma)); @@ -760,7 +876,8 @@ eb_vma_misplaced(struct i915_vma *vma) return true; /* avoid costly ping-pong once a batch bo ended up non-mappable */ - if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) + if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && + !i915_vma_is_map_and_fenceable(vma)) return !only_mappable_for_reloc(entry->flags); if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 && @@ -900,7 +1017,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); list_del_init(&vma->exec_list); i915_gem_execbuffer_unreserve_vma(vma); - i915_gem_object_put(vma->obj); + i915_vma_put(vma); } mutex_unlock(&dev->struct_mutex); @@ -1010,8 +1127,6 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, { const unsigned int other_rings = eb_other_engines(req); struct i915_vma *vma; - uint32_t flush_domains = 0; - bool flush_chipset = false; int ret; list_for_each_entry(vma, vmas, exec_list) { @@ -1024,16 +1139,11 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, } if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) - flush_chipset |= i915_gem_clflush_object(obj, false); - - flush_domains |= obj->base.write_domain; + i915_gem_clflush_object(obj, false); } - if (flush_chipset) - i915_gem_chipset_flush(req->engine->i915); - - if (flush_domains & I915_GEM_DOMAIN_GTT) - wmb(); + /* Unconditionally flush any chipset caches (for streaming writes). */ + i915_gem_chipset_flush(req->engine->i915); /* Unconditionally invalidate GPU caches and TLBs. */ return req->engine->emit_flush(req, EMIT_INVALIDATE); @@ -1194,15 +1304,8 @@ void i915_vma_move_to_active(struct i915_vma *vma, obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; } - if (flags & EXEC_OBJECT_NEEDS_FENCE) { - i915_gem_active_set(&obj->last_fence, req); - if (flags & __EXEC_OBJECT_HAS_FENCE) { - struct drm_i915_private *dev_priv = req->i915; - - list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, - &dev_priv->mm.fence_list); - } - } + if (flags & EXEC_OBJECT_NEEDS_FENCE) + i915_gem_active_set(&vma->last_fence, req); i915_vma_set_active(vma, idx); i915_gem_active_set(&vma->last_read[idx], req); @@ -1281,7 +1384,7 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req) return 0; } -static struct i915_vma* +static struct i915_vma * i915_gem_execbuffer_parse(struct intel_engine_cs *engine, struct drm_i915_gem_exec_object2 *shadow_exec_entry, struct drm_i915_gem_object *batch_obj, @@ -1305,31 +1408,28 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine, batch_start_offset, batch_len, is_master); - if (ret) - goto err; - - ret = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0); - if (ret) - goto err; + if (ret) { + if (ret == -EACCES) /* unhandled chained batch */ + vma = NULL; + else + vma = ERR_PTR(ret); + goto out; + } - i915_gem_object_unpin_pages(shadow_batch_obj); + vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) + goto out; memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); - vma = i915_gem_obj_to_ggtt(shadow_batch_obj); vma->exec_entry = shadow_exec_entry; vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN; i915_gem_object_get(shadow_batch_obj); list_add_tail(&vma->exec_list, &eb->vmas); - return vma; - -err: +out: i915_gem_object_unpin_pages(shadow_batch_obj); - if (ret == -EACCES) /* unhandled chained batch */ - return NULL; - else - return ERR_PTR(ret); + return vma; } static int @@ -1412,7 +1512,7 @@ execbuf_submit(struct i915_execbuffer_params *params, params->args_batch_start_offset; if (exec_len == 0) - exec_len = params->batch->size; + exec_len = params->batch->size - params->args_batch_start_offset; ret = params->engine->emit_bb_start(params->request, exec_start, exec_len, @@ -1595,7 +1695,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, memset(¶ms_master, 0x00, sizeof(params_master)); - eb = eb_create(args); + eb = eb_create(dev_priv, args); if (eb == NULL) { i915_gem_context_put(ctx); mutex_unlock(&dev->struct_mutex); @@ -1638,6 +1738,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ret = -EINVAL; goto err; } + if (args->batch_start_offset > params->batch->size || + args->batch_len > params->batch->size - args->batch_start_offset) { + DRM_DEBUG("Attempting to use out-of-bounds batch\n"); + ret = -EINVAL; + goto err; + } params->args_batch_start_offset = args->batch_start_offset; if (intel_engine_needs_cmd_parser(engine) && args->batch_len) { @@ -1677,6 +1783,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * hsw should have this fixed, but bdw mucks it up again. */ if (dispatch_flags & I915_DISPATCH_SECURE) { struct drm_i915_gem_object *obj = params->batch->obj; + struct i915_vma *vma; /* * So on first glance it looks freaky that we pin the batch here @@ -1688,11 +1795,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * fitting due to fragmentation. * So this is actually safe. */ - ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); - if (ret) + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto err; + } - params->batch = i915_gem_obj_to_ggtt(obj); + params->batch = vma; } /* Allocate a request for this batch buffer nice and early. */ @@ -1702,6 +1811,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err_batch_unpin; } + /* Whilst this request exists, batch_obj will be on the + * active_list, and so will hold the active reference. Only when this + * request is retired will the the batch_obj be moved onto the + * inactive_list and lose its active reference. Hence we do not need + * to explicitly hold another reference here. + */ + params->request->batch = params->batch; + ret = i915_gem_request_add_to_client(params->request, file); if (ret) goto err_request; @@ -1720,7 +1837,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ret = execbuf_submit(params, args, &eb->vmas); err_request: - __i915_add_request(params->request, params->batch->obj, ret == 0); + __i915_add_request(params->request, ret == 0); err_batch_unpin: /* diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index 9e8173f..8df1fa7 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c @@ -55,87 +55,85 @@ * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed. */ -static void i965_write_fence_reg(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) +#define pipelined 0 + +static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, + struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t fence_reg_lo, fence_reg_hi; int fence_pitch_shift; + u64 val; - if (INTEL_INFO(dev)->gen >= 6) { - fence_reg_lo = FENCE_REG_GEN6_LO(reg); - fence_reg_hi = FENCE_REG_GEN6_HI(reg); + if (INTEL_INFO(fence->i915)->gen >= 6) { + fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); + fence_reg_hi = FENCE_REG_GEN6_HI(fence->id); fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT; + } else { - fence_reg_lo = FENCE_REG_965_LO(reg); - fence_reg_hi = FENCE_REG_965_HI(reg); + fence_reg_lo = FENCE_REG_965_LO(fence->id); + fence_reg_hi = FENCE_REG_965_HI(fence->id); fence_pitch_shift = I965_FENCE_PITCH_SHIFT; } - /* To w/a incoherency with non-atomic 64-bit register updates, - * we split the 64-bit update into two 32-bit writes. In order - * for a partial fence not to be evaluated between writes, we - * precede the update with write to turn off the fence register, - * and only enable the fence as the last step. - * - * For extra levels of paranoia, we make sure each step lands - * before applying the next step. - */ - I915_WRITE(fence_reg_lo, 0); - POSTING_READ(fence_reg_lo); - - if (obj) { - u32 size = i915_gem_obj_ggtt_size(obj); - unsigned int tiling = i915_gem_object_get_tiling(obj); - unsigned int stride = i915_gem_object_get_stride(obj); - uint64_t val; - - /* Adjust fence size to match tiled area */ - if (tiling != I915_TILING_NONE) { - uint32_t row_size = stride * - (tiling == I915_TILING_Y ? 32 : 8); - size = (size / row_size) * row_size; - } - - val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & - 0xfffff000) << 32; - val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; - val |= (uint64_t)((stride / 128) - 1) << fence_pitch_shift; - if (tiling == I915_TILING_Y) - val |= 1 << I965_FENCE_TILING_Y_SHIFT; + val = 0; + if (vma) { + unsigned int tiling = i915_gem_object_get_tiling(vma->obj); + bool is_y_tiled = tiling == I915_TILING_Y; + unsigned int stride = i915_gem_object_get_stride(vma->obj); + u32 row_size = stride * (is_y_tiled ? 32 : 8); + u32 size = rounddown((u32)vma->node.size, row_size); + + val = ((vma->node.start + size - 4096) & 0xfffff000) << 32; + val |= vma->node.start & 0xfffff000; + val |= (u64)((stride / 128) - 1) << fence_pitch_shift; + if (is_y_tiled) + val |= BIT(I965_FENCE_TILING_Y_SHIFT); val |= I965_FENCE_REG_VALID; + } - I915_WRITE(fence_reg_hi, val >> 32); - POSTING_READ(fence_reg_hi); + if (!pipelined) { + struct drm_i915_private *dev_priv = fence->i915; - I915_WRITE(fence_reg_lo, val); + /* To w/a incoherency with non-atomic 64-bit register updates, + * we split the 64-bit update into two 32-bit writes. In order + * for a partial fence not to be evaluated between writes, we + * precede the update with write to turn off the fence register, + * and only enable the fence as the last step. + * + * For extra levels of paranoia, we make sure each step lands + * before applying the next step. + */ + I915_WRITE(fence_reg_lo, 0); + POSTING_READ(fence_reg_lo); + + I915_WRITE(fence_reg_hi, upper_32_bits(val)); + I915_WRITE(fence_reg_lo, lower_32_bits(val)); POSTING_READ(fence_reg_lo); - } else { - I915_WRITE(fence_reg_hi, 0); - POSTING_READ(fence_reg_hi); } } -static void i915_write_fence_reg(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) +static void i915_write_fence_reg(struct drm_i915_fence_reg *fence, + struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 val; - if (obj) { - u32 size = i915_gem_obj_ggtt_size(obj); - unsigned int tiling = i915_gem_object_get_tiling(obj); - unsigned int stride = i915_gem_object_get_stride(obj); + val = 0; + if (vma) { + unsigned int tiling = i915_gem_object_get_tiling(vma->obj); + bool is_y_tiled = tiling == I915_TILING_Y; + unsigned int stride = i915_gem_object_get_stride(vma->obj); int pitch_val; int tile_width; - WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || - (size & -size) != size || - (i915_gem_obj_ggtt_offset(obj) & (size - 1)), - "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", - i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); + WARN((vma->node.start & ~I915_FENCE_START_MASK) || + !is_power_of_2(vma->node.size) || + (vma->node.start & (vma->node.size - 1)), + "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n", + vma->node.start, + i915_vma_is_map_and_fenceable(vma), + vma->node.size); - if (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) + if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915)) tile_width = 128; else tile_width = 512; @@ -144,139 +142,141 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, pitch_val = stride / tile_width; pitch_val = ffs(pitch_val) - 1; - val = i915_gem_obj_ggtt_offset(obj); - if (tiling == I915_TILING_Y) - val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I915_FENCE_SIZE_BITS(size); + val = vma->node.start; + if (is_y_tiled) + val |= BIT(I830_FENCE_TILING_Y_SHIFT); + val |= I915_FENCE_SIZE_BITS(vma->node.size); val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - } else - val = 0; + } + + if (!pipelined) { + struct drm_i915_private *dev_priv = fence->i915; + i915_reg_t reg = FENCE_REG(fence->id); - I915_WRITE(FENCE_REG(reg), val); - POSTING_READ(FENCE_REG(reg)); + I915_WRITE(reg, val); + POSTING_READ(reg); + } } -static void i830_write_fence_reg(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) +static void i830_write_fence_reg(struct drm_i915_fence_reg *fence, + struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t val; + u32 val; - if (obj) { - u32 size = i915_gem_obj_ggtt_size(obj); - unsigned int tiling = i915_gem_object_get_tiling(obj); - unsigned int stride = i915_gem_object_get_stride(obj); - uint32_t pitch_val; + val = 0; + if (vma) { + unsigned int tiling = i915_gem_object_get_tiling(vma->obj); + bool is_y_tiled = tiling == I915_TILING_Y; + unsigned int stride = i915_gem_object_get_stride(vma->obj); + u32 pitch_val; - WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || - (size & -size) != size || - (i915_gem_obj_ggtt_offset(obj) & (size - 1)), - "object 0x%08llx not 512K or pot-size 0x%08x aligned\n", - i915_gem_obj_ggtt_offset(obj), size); + WARN((vma->node.start & ~I830_FENCE_START_MASK) || + !is_power_of_2(vma->node.size) || + (vma->node.start & (vma->node.size - 1)), + "object 0x%08llx not 512K or pot-size 0x%08llx aligned\n", + vma->node.start, vma->node.size); pitch_val = stride / 128; pitch_val = ffs(pitch_val) - 1; - val = i915_gem_obj_ggtt_offset(obj); - if (tiling == I915_TILING_Y) - val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I830_FENCE_SIZE_BITS(size); + val = vma->node.start; + if (is_y_tiled) + val |= BIT(I830_FENCE_TILING_Y_SHIFT); + val |= I830_FENCE_SIZE_BITS(vma->node.size); val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - } else - val = 0; + } - I915_WRITE(FENCE_REG(reg), val); - POSTING_READ(FENCE_REG(reg)); -} + if (!pipelined) { + struct drm_i915_private *dev_priv = fence->i915; + i915_reg_t reg = FENCE_REG(fence->id); -inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) -{ - return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; + I915_WRITE(reg, val); + POSTING_READ(reg); + } } -static void i915_gem_write_fence(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) +static void fence_write(struct drm_i915_fence_reg *fence, + struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(dev); - - /* Ensure that all CPU reads are completed before installing a fence - * and all writes before removing the fence. + /* Previous access through the fence register is marshalled by + * the mb() inside the fault handlers (i915_gem_release_mmaps) + * and explicitly managed for internal users. */ - if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) - mb(); - - WARN(obj && - (!i915_gem_object_get_stride(obj) || - !i915_gem_object_get_tiling(obj)), - "bogus fence setup with stride: 0x%x, tiling mode: %i\n", - i915_gem_object_get_stride(obj), - i915_gem_object_get_tiling(obj)); - - if (IS_GEN2(dev)) - i830_write_fence_reg(dev, reg, obj); - else if (IS_GEN3(dev)) - i915_write_fence_reg(dev, reg, obj); - else if (INTEL_INFO(dev)->gen >= 4) - i965_write_fence_reg(dev, reg, obj); - - /* And similarly be paranoid that no direct access to this region - * is reordered to before the fence is installed. + + if (IS_GEN2(fence->i915)) + i830_write_fence_reg(fence, vma); + else if (IS_GEN3(fence->i915)) + i915_write_fence_reg(fence, vma); + else + i965_write_fence_reg(fence, vma); + + /* Access through the fenced region afterwards is + * ordered by the posting reads whilst writing the registers. */ - if (i915_gem_object_needs_mb(obj)) - mb(); -} -static inline int fence_number(struct drm_i915_private *dev_priv, - struct drm_i915_fence_reg *fence) -{ - return fence - dev_priv->fence_regs; + fence->dirty = false; } -static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, - struct drm_i915_fence_reg *fence, - bool enable) +static int fence_update(struct drm_i915_fence_reg *fence, + struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - int reg = fence_number(dev_priv, fence); + int ret; - i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); + if (vma) { + if (!i915_vma_is_map_and_fenceable(vma)) + return -EINVAL; - if (enable) { - obj->fence_reg = reg; - fence->obj = obj; - list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); - } else { - obj->fence_reg = I915_FENCE_REG_NONE; - fence->obj = NULL; - list_del_init(&fence->lru_list); + if (WARN(!i915_gem_object_get_stride(vma->obj) || + !i915_gem_object_get_tiling(vma->obj), + "bogus fence setup with stride: 0x%x, tiling mode: %i\n", + i915_gem_object_get_stride(vma->obj), + i915_gem_object_get_tiling(vma->obj))) + return -EINVAL; + + ret = i915_gem_active_retire(&vma->last_fence, + &vma->obj->base.dev->struct_mutex); + if (ret) + return ret; } - obj->fence_dirty = false; -} -static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) -{ - if (i915_gem_object_is_tiled(obj)) - i915_gem_release_mmap(obj); + if (fence->vma) { + ret = i915_gem_active_retire(&fence->vma->last_fence, + &fence->vma->obj->base.dev->struct_mutex); + if (ret) + return ret; + } - /* As we do not have an associated fence register, we will force - * a tiling change if we ever need to acquire one. - */ - obj->fence_dirty = false; - obj->fence_reg = I915_FENCE_REG_NONE; -} + if (fence->vma && fence->vma != vma) { + /* Ensure that all userspace CPU access is completed before + * stealing the fence. + */ + i915_gem_release_mmap(fence->vma->obj); -static int -i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) -{ - return i915_gem_active_retire(&obj->last_fence, - &obj->base.dev->struct_mutex); + fence->vma->fence = NULL; + fence->vma = NULL; + + list_move(&fence->link, &fence->i915->mm.fence_list); + } + + fence_write(fence, vma); + + if (vma) { + if (fence->vma != vma) { + vma->fence = fence; + fence->vma = vma; + } + + list_move_tail(&fence->link, &fence->i915->mm.fence_list); + } + + return 0; } /** - * i915_gem_object_put_fence - force-remove fence for an object - * @obj: object to map through a fence reg + * i915_vma_put_fence - force-remove fence for a VMA + * @vma: vma to map linearly (not through a fence reg) * * This function force-removes any fence from the given object, which is useful * if the kernel wants to do untiled GTT access. @@ -286,70 +286,40 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) * 0 on success, negative error code on failure. */ int -i915_gem_object_put_fence(struct drm_i915_gem_object *obj) +i915_vma_put_fence(struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct drm_i915_fence_reg *fence; - int ret; - - ret = i915_gem_object_wait_fence(obj); - if (ret) - return ret; + struct drm_i915_fence_reg *fence = vma->fence; - if (obj->fence_reg == I915_FENCE_REG_NONE) + if (!fence) return 0; - fence = &dev_priv->fence_regs[obj->fence_reg]; - - if (WARN_ON(fence->pin_count)) + if (fence->pin_count) return -EBUSY; - i915_gem_object_fence_lost(obj); - i915_gem_object_update_fence(obj, fence, false); - - return 0; + return fence_update(fence, NULL); } -static struct drm_i915_fence_reg * -i915_find_fence_reg(struct drm_device *dev) +static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_fence_reg *reg, *avail; - int i; - - /* First try to find a free reg */ - avail = NULL; - for (i = 0; i < dev_priv->num_fence_regs; i++) { - reg = &dev_priv->fence_regs[i]; - if (!reg->obj) - return reg; - - if (!reg->pin_count) - avail = reg; - } - - if (avail == NULL) - goto deadlock; + struct drm_i915_fence_reg *fence; - /* None available, try to steal one or wait for a user to finish */ - list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { - if (reg->pin_count) + list_for_each_entry(fence, &dev_priv->mm.fence_list, link) { + if (fence->pin_count) continue; - return reg; + return fence; } -deadlock: /* Wait for completion of pending flips which consume fences */ - if (intel_has_pending_fb_unpin(dev)) + if (intel_has_pending_fb_unpin(&dev_priv->drm)) return ERR_PTR(-EAGAIN); return ERR_PTR(-EDEADLK); } /** - * i915_gem_object_get_fence - set up fencing for an object - * @obj: object to map through a fence reg + * i915_vma_get_fence - set up fencing for a vma + * @vma: vma to map through a fence reg * * When mapping objects through the GTT, userspace wants to be able to write * to them without having to worry about swizzling if the object is tiled. @@ -366,103 +336,27 @@ deadlock: * 0 on success, negative error code on failure. */ int -i915_gem_object_get_fence(struct drm_i915_gem_object *obj) +i915_vma_get_fence(struct i915_vma *vma) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - bool enable = i915_gem_object_is_tiled(obj); - struct drm_i915_fence_reg *reg; - int ret; - - /* Have we updated the tiling parameters upon the object and so - * will need to serialise the write to the associated fence register? - */ - if (obj->fence_dirty) { - ret = i915_gem_object_wait_fence(obj); - if (ret) - return ret; - } + struct drm_i915_fence_reg *fence; + struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; /* Just update our place in the LRU if our fence is getting reused. */ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - reg = &dev_priv->fence_regs[obj->fence_reg]; - if (!obj->fence_dirty) { - list_move_tail(®->lru_list, - &dev_priv->mm.fence_list); + if (vma->fence) { + fence = vma->fence; + if (!fence->dirty) { + list_move_tail(&fence->link, + &fence->i915->mm.fence_list); return 0; } - } else if (enable) { - if (WARN_ON(!obj->map_and_fenceable)) - return -EINVAL; - - reg = i915_find_fence_reg(dev); - if (IS_ERR(reg)) - return PTR_ERR(reg); - - if (reg->obj) { - struct drm_i915_gem_object *old = reg->obj; - - ret = i915_gem_object_wait_fence(old); - if (ret) - return ret; - - i915_gem_object_fence_lost(old); - } + } else if (set) { + fence = fence_find(to_i915(vma->vm->dev)); + if (IS_ERR(fence)) + return PTR_ERR(fence); } else return 0; - i915_gem_object_update_fence(obj, reg, enable); - - return 0; -} - -/** - * i915_gem_object_pin_fence - pin fencing state - * @obj: object to pin fencing for - * - * This pins the fencing state (whether tiled or untiled) to make sure the - * object is ready to be used as a scanout target. Fencing status must be - * synchronize first by calling i915_gem_object_get_fence(): - * - * The resulting fence pin reference must be released again with - * i915_gem_object_unpin_fence(). - * - * Returns: - * - * True if the object has a fence, false otherwise. - */ -bool -i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) -{ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); - - WARN_ON(!ggtt_vma || - dev_priv->fence_regs[obj->fence_reg].pin_count > - i915_vma_pin_count(ggtt_vma)); - dev_priv->fence_regs[obj->fence_reg].pin_count++; - return true; - } else - return false; -} - -/** - * i915_gem_object_unpin_fence - unpin fencing state - * @obj: object to unpin fencing for - * - * This releases the fence pin reference acquired through - * i915_gem_object_pin_fence. It will handle both objects with and without an - * attached fence correctly, callers do not need to distinguish this. - */ -void -i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) -{ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); - dev_priv->fence_regs[obj->fence_reg].pin_count--; - } + return fence_update(fence, set); } /** @@ -479,17 +373,16 @@ void i915_gem_restore_fences(struct drm_device *dev) for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; + struct i915_vma *vma = reg->vma; /* * Commit delayed tiling changes if we have an object still * attached to the fence, otherwise just clear the fence. */ - if (reg->obj) { - i915_gem_object_update_fence(reg->obj, reg, - i915_gem_object_get_tiling(reg->obj)); - } else { - i915_gem_write_fence(dev, i, NULL); - } + if (vma && !i915_gem_object_is_tiled(vma->obj)) + vma = NULL; + + fence_update(reg, vma); } } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 18c7c96..b90fdce 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -170,11 +170,13 @@ static int ppgtt_bind_vma(struct i915_vma *vma, { u32 pte_flags = 0; + vma->pages = vma->obj->pages; + /* Currently applicable only to VLV */ if (vma->obj->gt_ro) pte_flags |= PTE_READ_ONLY; - vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, + vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, cache_level, pte_flags); return 0; @@ -2618,8 +2620,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, if (obj->gt_ro) pte_flags |= PTE_READ_ONLY; - vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, - vma->node.start, + vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, cache_level, pte_flags); /* @@ -2651,8 +2652,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, if (flags & I915_VMA_GLOBAL_BIND) { vma->vm->insert_entries(vma->vm, - vma->ggtt_view.pages, - vma->node.start, + vma->pages, vma->node.start, cache_level, pte_flags); } @@ -2660,8 +2660,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt; appgtt->base.insert_entries(&appgtt->base, - vma->ggtt_view.pages, - vma->node.start, + vma->pages, vma->node.start, cache_level, pte_flags); } @@ -2795,7 +2794,6 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) if (dev_priv->mm.aliasing_ppgtt) { struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - ppgtt->base.cleanup(&ppgtt->base); kfree(ppgtt); } @@ -2812,7 +2810,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ggtt->base.cleanup(&ggtt->base); arch_phys_wc_del(ggtt->mtrr); - io_mapping_free(ggtt->mappable); + io_mapping_fini(&ggtt->mappable); } static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -3210,9 +3208,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) if (!HAS_LLC(dev_priv)) ggtt->base.mm.color_adjust = i915_gtt_color_adjust; - ggtt->mappable = - io_mapping_create_wc(ggtt->mappable_base, ggtt->mappable_end); - if (!ggtt->mappable) { + if (!io_mapping_init_wc(&dev_priv->ggtt.mappable, + dev_priv->ggtt.mappable_base, + dev_priv->ggtt.mappable_end)) { ret = -EIO; goto out_gtt_cleanup; } @@ -3323,6 +3321,7 @@ void i915_vma_destroy(struct i915_vma *vma) GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(!i915_vma_is_closed(vma)); + GEM_BUG_ON(vma->fence); list_del(&vma->vm_link); if (!i915_vma_is_ggtt(vma)) @@ -3342,33 +3341,29 @@ void i915_vma_close(struct i915_vma *vma) } static struct i915_vma * -__i915_gem_vma_create(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view) +__i915_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) { struct i915_vma *vma; int i; GEM_BUG_ON(vm->closed); - if (WARN_ON(i915_is_ggtt(vm) != !!view)) - return ERR_PTR(-EINVAL); - vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); if (vma == NULL) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&vma->obj_link); INIT_LIST_HEAD(&vma->exec_list); for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) init_request_active(&vma->last_read[i], i915_vma_retire); + init_request_active(&vma->last_fence, NULL); list_add(&vma->vm_link, &vm->unbound_list); vma->vm = vm; vma->obj = obj; vma->size = obj->base.size; - if (i915_is_ggtt(vm)) { - vma->flags |= I915_VMA_GGTT; + if (view) { vma->ggtt_view = *view; if (view->type == I915_GGTT_VIEW_PARTIAL) { vma->size = view->params.partial.size; @@ -3378,46 +3373,79 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj, intel_rotation_info_size(&view->params.rotated); vma->size <<= PAGE_SHIFT; } + } + + if (i915_is_ggtt(vm)) { + vma->flags |= I915_VMA_GGTT; } else { i915_ppgtt_get(i915_vm_to_ppgtt(vm)); } list_add_tail(&vma->obj_link, &obj->vma_list); - return vma; } +static inline bool vma_matches(struct i915_vma *vma, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + if (vma->vm != vm) + return false; + + if (!i915_vma_is_ggtt(vma)) + return true; + + if (!view) + return vma->ggtt_view.type == 0; + + if (vma->ggtt_view.type != view->type) + return false; + + return memcmp(&vma->ggtt_view.params, + &view->params, + sizeof(view->params)) == 0; +} + struct i915_vma * -i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) +i915_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + GEM_BUG_ON(view && !i915_is_ggtt(vm)); + GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view)); + + return __i915_vma_create(obj, vm, view); +} + +struct i915_vma * +i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) { struct i915_vma *vma; - vma = i915_gem_obj_to_vma(obj, vm); - if (!vma) - vma = __i915_gem_vma_create(obj, vm, - i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL); + list_for_each_entry_reverse(vma, &obj->vma_list, obj_link) + if (vma_matches(vma, vm, view)) + return vma; - return vma; + return NULL; } struct i915_vma * -i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view) +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); + struct i915_vma *vma; - GEM_BUG_ON(!view); + GEM_BUG_ON(view && !i915_is_ggtt(vm)); + vma = i915_gem_obj_to_vma(obj, vm, view); if (!vma) - vma = __i915_gem_vma_create(obj, &ggtt->base, view); + vma = __i915_vma_create(obj, vm, view); GEM_BUG_ON(i915_vma_is_closed(vma)); return vma; - } static struct scatterlist * @@ -3449,18 +3477,16 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, } static struct sg_table * -intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, +intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { const size_t n_pages = obj->base.size / PAGE_SIZE; - unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; - unsigned int size_pages_uv; + unsigned int size = intel_rotation_info_size(rot_info); struct sgt_iter sgt_iter; dma_addr_t dma_addr; unsigned long i; dma_addr_t *page_addr_list; struct sg_table *st; - unsigned int uv_start_page; struct scatterlist *sg; int ret = -ENOMEM; @@ -3471,18 +3497,12 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, if (!page_addr_list) return ERR_PTR(ret); - /* Account for UV plane with NV12. */ - if (rot_info->pixel_format == DRM_FORMAT_NV12) - size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height; - else - size_pages_uv = 0; - /* Allocate target SG list. */ st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) goto err_st_alloc; - ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL); + ret = sg_alloc_table(st, size, GFP_KERNEL); if (ret) goto err_sg_alloc; @@ -3495,32 +3515,14 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, st->nents = 0; sg = st->sgl; - /* Rotate the pages. */ - sg = rotate_pages(page_addr_list, 0, - rot_info->plane[0].width, rot_info->plane[0].height, - rot_info->plane[0].width, - st, sg); - - /* Append the UV plane if NV12. */ - if (rot_info->pixel_format == DRM_FORMAT_NV12) { - uv_start_page = size_pages; - - /* Check for tile-row un-alignment. */ - if (offset_in_page(rot_info->uv_offset)) - uv_start_page--; - - rot_info->uv_start_page = uv_start_page; - - sg = rotate_pages(page_addr_list, rot_info->uv_start_page, - rot_info->plane[1].width, rot_info->plane[1].height, - rot_info->plane[1].width, - st, sg); + for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { + sg = rotate_pages(page_addr_list, rot_info->plane[i].offset, + rot_info->plane[i].width, rot_info->plane[i].height, + rot_info->plane[i].stride, st, sg); } - DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n", - obj->base.size, rot_info->plane[0].width, - rot_info->plane[0].height, size_pages + size_pages_uv, - size_pages); + DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n", + obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); drm_free_large(page_addr_list); @@ -3531,10 +3533,9 @@ err_sg_alloc: err_st_alloc: drm_free_large(page_addr_list); - DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n", - obj->base.size, ret, rot_info->plane[0].width, - rot_info->plane[0].height, size_pages + size_pages_uv, - size_pages); + DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", + obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); + return ERR_PTR(ret); } @@ -3584,28 +3585,27 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) { int ret = 0; - if (vma->ggtt_view.pages) + if (vma->pages) return 0; if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) - vma->ggtt_view.pages = vma->obj->pages; + vma->pages = vma->obj->pages; else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) - vma->ggtt_view.pages = + vma->pages = intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj); else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL) - vma->ggtt_view.pages = - intel_partial_pages(&vma->ggtt_view, vma->obj); + vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); else WARN_ONCE(1, "GGTT view %u not implemented!\n", vma->ggtt_view.type); - if (!vma->ggtt_view.pages) { + if (!vma->pages) { DRM_ERROR("Failed to get pages for GGTT view type %u!\n", vma->ggtt_view.type); ret = -EINVAL; - } else if (IS_ERR(vma->ggtt_view.pages)) { - ret = PTR_ERR(vma->ggtt_view.pages); - vma->ggtt_view.pages = NULL; + } else if (IS_ERR(vma->pages)) { + ret = PTR_ERR(vma->pages); + vma->pages = NULL; DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", vma->ggtt_view.type, ret); } @@ -3668,8 +3668,11 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) { void __iomem *ptr; + /* Access through the GTT requires the device to be awake. */ + assert_rpm_wakelock_held(to_i915(vma->vm->dev)); + lockdep_assert_held(&vma->vm->dev->struct_mutex); - if (WARN_ON(!vma->obj->map_and_fenceable)) + if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) return IO_ERR_PTR(-ENODEV); GEM_BUG_ON(!i915_vma_is_ggtt(vma)); @@ -3677,7 +3680,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ptr = vma->iomap; if (ptr == NULL) { - ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable, + ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, vma->node.start, vma->node.size); if (ptr == NULL) @@ -3689,3 +3692,15 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) __i915_vma_pin(vma); return ptr; } + +void i915_vma_unpin_and_release(struct i915_vma **p_vma) +{ + struct i915_vma *vma; + + vma = fetch_and_zero(p_vma); + if (!vma) + return; + + i915_vma_unpin(vma); + i915_vma_put(vma); +} diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index cc56206..a9aec25 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -38,7 +38,13 @@ #include "i915_gem_request.h" +#define I915_FENCE_REG_NONE -1 +#define I915_MAX_NUM_FENCES 32 +/* 32 fences + sign bit for FENCE_REG_NONE */ +#define I915_MAX_NUM_FENCE_BITS 6 + struct drm_i915_file_private; +struct drm_i915_fence_reg; typedef uint32_t gen6_pte_t; typedef uint64_t gen8_pte_t; @@ -139,12 +145,9 @@ enum i915_ggtt_view_type { }; struct intel_rotation_info { - unsigned int uv_offset; - uint32_t pixel_format; - unsigned int uv_start_page; struct { /* tiles */ - unsigned int width, height; + unsigned int width, height, stride, offset; } plane[2]; }; @@ -158,8 +161,6 @@ struct i915_ggtt_view { } partial; struct intel_rotation_info rotated; } params; - - struct sg_table *pages; }; extern const struct i915_ggtt_view i915_ggtt_view_normal; @@ -179,8 +180,11 @@ struct i915_vma { struct drm_mm_node node; struct drm_i915_gem_object *obj; struct i915_address_space *vm; + struct drm_i915_fence_reg *fence; + struct sg_table *pages; void __iomem *iomap; u64 size; + u64 display_alignment; unsigned int flags; /** @@ -201,11 +205,13 @@ struct i915_vma { #define I915_VMA_LOCAL_BIND BIT(7) #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW) -#define I915_VMA_GGTT BIT(8) -#define I915_VMA_CLOSED BIT(9) +#define I915_VMA_GGTT BIT(8) +#define I915_VMA_CAN_FENCE BIT(9) +#define I915_VMA_CLOSED BIT(10) unsigned int active; struct i915_gem_active last_read[I915_NUM_ENGINES]; + struct i915_gem_active last_fence; /** * Support different GGTT views into the same object. @@ -232,11 +238,22 @@ struct i915_vma { struct drm_i915_gem_exec_object2 *exec_entry; }; +struct i915_vma * +i915_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view); +void i915_vma_unpin_and_release(struct i915_vma **p_vma); + static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) { return vma->flags & I915_VMA_GGTT; } +static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma) +{ + return vma->flags & I915_VMA_CAN_FENCE; +} + static inline bool i915_vma_is_closed(const struct i915_vma *vma) { return vma->flags & I915_VMA_CLOSED; @@ -270,6 +287,15 @@ static inline bool i915_vma_has_active_engine(const struct i915_vma *vma, return vma->active & BIT(engine); } +static inline u32 i915_ggtt_offset(const struct i915_vma *vma) +{ + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON(!vma->node.allocated); + GEM_BUG_ON(upper_32_bits(vma->node.start)); + GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1)); + return lower_32_bits(vma->node.start); +} + struct i915_page_dma { struct page *page; union { @@ -413,13 +439,13 @@ struct i915_address_space { */ struct i915_ggtt { struct i915_address_space base; + struct io_mapping mappable; /* Mapping to our CPU mappable region */ size_t stolen_size; /* Total size of stolen memory */ size_t stolen_usable_size; /* Total size minus BIOS reserved */ size_t stolen_reserved_base; size_t stolen_reserved_size; u64 mappable_end; /* End offset that we can CPU map */ - struct io_mapping *mappable; /* Mapping to our CPU mappable region */ phys_addr_t mappable_base; /* PA of our GMADR */ /** "Graphics Stolen Memory" holds the global PTEs */ @@ -608,24 +634,11 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); -static inline bool -i915_ggtt_view_equal(const struct i915_ggtt_view *a, - const struct i915_ggtt_view *b) -{ - if (WARN_ON(!a || !b)) - return false; - - if (a->type != b->type) - return false; - if (a->type != I915_GGTT_VIEW_NORMAL) - return !memcmp(&a->params, &b->params, sizeof(a->params)); - return true; -} - /* Flags used by pin/bind&friends. */ #define PIN_NONBLOCK BIT(0) #define PIN_MAPPABLE BIT(1) #define PIN_ZONE_4G BIT(2) +#define PIN_NONFAULT BIT(3) #define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ #define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ @@ -715,4 +728,10 @@ static inline void i915_vma_unpin_iomap(struct i915_vma *vma) i915_vma_unpin(vma); } +static inline struct page *i915_vma_first_page(struct i915_vma *vma) +{ + GEM_BUG_ON(!vma->pages); + return sg_page(vma->pages->sgl); +} + #endif diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 57fd767..95b7e9a 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -30,8 +30,7 @@ struct render_state { const struct intel_renderstate_rodata *rodata; - struct drm_i915_gem_object *obj; - u64 ggtt_offset; + struct i915_vma *vma; u32 aux_batch_size; u32 aux_batch_offset; }; @@ -73,7 +72,7 @@ render_state_get_rodata(const struct drm_i915_gem_request *req) static int render_state_setup(struct render_state *so) { - struct drm_device *dev = so->obj->base.dev; + struct drm_device *dev = so->vma->vm->dev; const struct intel_renderstate_rodata *rodata = so->rodata; const bool has_64bit_reloc = INTEL_GEN(dev) >= 8; unsigned int i = 0, reloc_index = 0; @@ -81,18 +80,18 @@ static int render_state_setup(struct render_state *so) u32 *d; int ret; - ret = i915_gem_object_set_to_cpu_domain(so->obj, true); + ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true); if (ret) return ret; - page = i915_gem_object_get_dirty_page(so->obj, 0); + page = i915_gem_object_get_dirty_page(so->vma->obj, 0); d = kmap(page); while (i < rodata->batch_items) { u32 s = rodata->batch[i]; if (i * 4 == rodata->reloc[reloc_index]) { - u64 r = s + so->ggtt_offset; + u64 r = s + so->vma->node.start; s = lower_32_bits(r); if (has_64bit_reloc) { if (i + 1 >= rodata->batch_items || @@ -154,7 +153,7 @@ static int render_state_setup(struct render_state *so) kunmap(page); - ret = i915_gem_object_set_to_gtt_domain(so->obj, false); + ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false); if (ret) return ret; @@ -175,6 +174,7 @@ err_out: int i915_gem_render_state_init(struct drm_i915_gem_request *req) { struct render_state so; + struct drm_i915_gem_object *obj; int ret; if (WARN_ON(req->engine->id != RCS)) @@ -187,21 +187,25 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req) if (so.rodata->batch_items * 4 > 4096) return -EINVAL; - so.obj = i915_gem_object_create(&req->i915->drm, 4096); - if (IS_ERR(so.obj)) - return PTR_ERR(so.obj); + obj = i915_gem_object_create(&req->i915->drm, 4096); + if (IS_ERR(obj)) + return PTR_ERR(obj); - ret = i915_gem_object_ggtt_pin(so.obj, NULL, 0, 0, 0); - if (ret) + so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL); + if (IS_ERR(so.vma)) { + ret = PTR_ERR(so.vma); goto err_obj; + } - so.ggtt_offset = i915_gem_obj_ggtt_offset(so.obj); + ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL); + if (ret) + goto err_obj; ret = render_state_setup(&so); if (ret) goto err_unpin; - ret = req->engine->emit_bb_start(req, so.ggtt_offset, + ret = req->engine->emit_bb_start(req, so.vma->node.start, so.rodata->batch_items * 4, I915_DISPATCH_SECURE); if (ret) @@ -209,7 +213,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req) if (so.aux_batch_size > 8) { ret = req->engine->emit_bb_start(req, - (so.ggtt_offset + + (so.vma->node.start + so.aux_batch_offset), so.aux_batch_size, I915_DISPATCH_SECURE); @@ -217,10 +221,10 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req) goto err_unpin; } - i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req, 0); + i915_vma_move_to_active(so.vma, req, 0); err_unpin: - i915_gem_object_ggtt_unpin(so.obj); + i915_vma_unpin(so.vma); err_obj: - i915_gem_object_put(so.obj); + i915_gem_object_put(obj); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h index c44fca8..18cce3f 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.h +++ b/drivers/gpu/drm/i915/i915_gem_render_state.h @@ -24,7 +24,7 @@ #ifndef _I915_GEM_RENDER_STATE_H_ #define _I915_GEM_RENDER_STATE_H_ -#include <linux/types.h> +struct drm_i915_gem_request; int i915_gem_render_state_init(struct drm_i915_gem_request *req); diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 6a16616..1a21532 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -137,8 +137,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, list_add_tail(&req->client_list, &file_priv->mm.request_list); spin_unlock(&file_priv->mm.lock); - req->pid = get_pid(task_pid(current)); - return 0; } @@ -154,9 +152,6 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) list_del(&request->client_list); request->file_priv = NULL; spin_unlock(&file_priv->mm.lock); - - put_pid(request->pid); - request->pid = NULL; } void i915_gem_retire_noop(struct i915_gem_active *active, @@ -355,7 +350,35 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, if (req && i915_gem_request_completed(req)) i915_gem_request_retire(req); - req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL); + /* Beware: Dragons be flying overhead. + * + * We use RCU to look up requests in flight. The lookups may + * race with the request being allocated from the slab freelist. + * That is the request we are writing to here, may be in the process + * of being read by __i915_gem_active_get_rcu(). As such, + * we have to be very careful when overwriting the contents. During + * the RCU lookup, we change chase the request->engine pointer, + * read the request->fence.seqno and increment the reference count. + * + * The reference count is incremented atomically. If it is zero, + * the lookup knows the request is unallocated and complete. Otherwise, + * it is either still in use, or has been reallocated and reset + * with fence_init(). This increment is safe for release as we check + * that the request we have a reference to and matches the active + * request. + * + * Before we increment the refcount, we chase the request->engine + * pointer. We must not call kmem_cache_zalloc() or else we set + * that pointer to NULL and cause a crash during the lookup. If + * we see the request is completed (based on the value of the + * old engine and seqno), the lookup is complete and reports NULL. + * If we decide the request is not completed (new engine or seqno), + * then we grab a reference and double check that it is still the + * active request - which it won't be and restart the lookup. + * + * Do not use kmem_cache_zalloc() here! + */ + req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); if (!req) return ERR_PTR(-ENOMEM); @@ -375,6 +398,12 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, req->engine = engine; req->ctx = i915_gem_context_get(ctx); + /* No zalloc, must clear what we need by hand */ + req->previous_context = NULL; + req->file_priv = NULL; + req->batch = NULL; + req->elsp_submitted = 0; + /* * Reserve space in the ring buffer for all the commands required to * eventually emit this request. This is to guarantee that the @@ -391,6 +420,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, if (ret) goto err_ctx; + /* Record the position of the start of the request so that + * should we detect the updated seqno part-way through the + * GPU processing the request, we never over-estimate the + * position of the head. + */ + req->head = req->ring->tail; + return req; err_ctx: @@ -426,22 +462,14 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine) * request is not being tracked for completion but the work itself is * going to happen on the hardware. This would be a Bad Thing(tm). */ -void __i915_add_request(struct drm_i915_gem_request *request, - struct drm_i915_gem_object *obj, - bool flush_caches) +void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) { - struct intel_engine_cs *engine; - struct intel_ring *ring; + struct intel_engine_cs *engine = request->engine; + struct intel_ring *ring = request->ring; u32 request_start; u32 reserved_tail; int ret; - if (WARN_ON(!request)) - return; - - engine = request->engine; - ring = request->ring; - /* * To ensure that this call will not fail, space for its emissions * should already have been reserved in the ring buffer. Let the ring @@ -467,16 +495,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, trace_i915_gem_request_add(request); - request->head = request_start; - - /* Whilst this request exists, batch_obj will be on the - * active_list, and so will hold the active reference. Only when this - * request is retired will the the batch_obj be moved onto the - * inactive_list and lose its active reference. Hence we do not need - * to explicitly hold another reference here. - */ - request->batch_obj = obj; - /* Seal the request and mark it as pending execution. Note that * we may inspect this state, without holding any locks, during * hangcheck. Hence we apply the barrier to ensure that we do not @@ -489,10 +507,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, list_add_tail(&request->link, &engine->request_list); list_add_tail(&request->ring_link, &ring->request_list); - /* Record the position of the start of the request so that + /* Record the position of the start of the breadcrumb so that * should we detect the updated seqno part-way through the * GPU processing the request, we never over-estimate the - * position of the head. + * position of the ring's HEAD. */ request->postfix = ring->tail; diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 3496e28..6c72bd8 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -51,6 +51,13 @@ struct intel_signal_node { * emission time to be associated with the request for tracking how far ahead * of the GPU the submission is. * + * When modifying this structure be very aware that we perform a lockless + * RCU lookup of it that may race against reallocation of the struct + * from the slab freelist. We intentionally do not zero the structure on + * allocation so that the lookup can use the dangling pointers (and is + * cogniscent that those pointers may be wrong). Instead, everything that + * needs to be initialised must be done so explicitly. + * * The requests are reference counted. */ struct drm_i915_gem_request { @@ -111,7 +118,7 @@ struct drm_i915_gem_request { /** Batch buffer related to this request if any (used for * error state dump only). */ - struct drm_i915_gem_object *batch_obj; + struct i915_vma *batch; struct list_head active_list; /** Time at which this request was emitted, in jiffies. */ @@ -127,9 +134,6 @@ struct drm_i915_gem_request { /** file_priv list entry for this request */ struct list_head client_list; - /** process identifier submitting this request */ - struct pid *pid; - /** * The ELSP only accepts two elements at a time, so we queue * context/tail pairs on a given queue (ring->execlist_queue) until the @@ -218,13 +222,11 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, *pdst = src; } -void __i915_add_request(struct drm_i915_gem_request *req, - struct drm_i915_gem_object *batch_obj, - bool flush_caches); +void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches); #define i915_add_request(req) \ - __i915_add_request(req, NULL, true) + __i915_add_request(req, true) #define i915_add_request_no_flush(req) \ - __i915_add_request(req, NULL, false) + __i915_add_request(req, false) struct intel_rps_client; #define NO_WAITBOOST ERR_PTR(-1) @@ -360,41 +362,34 @@ __i915_gem_active_peek(const struct i915_gem_active *active) } /** - * i915_gem_active_peek - report the active request being monitored + * i915_gem_active_raw - return the active request * @active - the active tracker * - * i915_gem_active_peek() returns the current request being tracked if - * still active, or NULL. It does not obtain a reference on the request - * for the caller, so the caller must hold struct_mutex. + * i915_gem_active_raw() returns the current request being tracked, or NULL. + * It does not obtain a reference on the request for the caller, so the caller + * must hold struct_mutex. */ static inline struct drm_i915_gem_request * -i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex) +i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex) { - struct drm_i915_gem_request *request; - - request = rcu_dereference_protected(active->request, - lockdep_is_held(mutex)); - if (!request || i915_gem_request_completed(request)) - return NULL; - - return request; + return rcu_dereference_protected(active->request, + lockdep_is_held(mutex)); } /** - * i915_gem_active_peek_rcu - report the active request being monitored + * i915_gem_active_peek - report the active request being monitored * @active - the active tracker * - * i915_gem_active_peek_rcu() returns the current request being tracked if + * i915_gem_active_peek() returns the current request being tracked if * still active, or NULL. It does not obtain a reference on the request - * for the caller, and inspection of the request is only valid under - * the RCU lock. + * for the caller, so the caller must hold struct_mutex. */ static inline struct drm_i915_gem_request * -i915_gem_active_peek_rcu(const struct i915_gem_active *active) +i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex) { struct drm_i915_gem_request *request; - request = rcu_dereference(active->request); + request = i915_gem_active_raw(active, mutex); if (!request || i915_gem_request_completed(request)) return NULL; @@ -465,6 +460,10 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active) * just report the active tracker is idle. If the new request is * incomplete, then we acquire a reference on it and check that * it remained the active request. + * + * It is then imperative that we do not zero the request on + * reallocation, so that we can chase the dangling pointers! + * See i915_gem_request_alloc(). */ do { struct drm_i915_gem_request *request; @@ -497,6 +496,9 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active) * incremented) then the following read for rcu_access_pointer() * must occur after the atomic operation and so confirm * that this request is the one currently being tracked. + * + * The corresponding write barrier is part of + * rcu_assign_pointer(). */ if (!request || request == rcu_access_pointer(active->request)) return rcu_pointer_handoff(request); @@ -635,8 +637,7 @@ i915_gem_active_retire(struct i915_gem_active *active, struct drm_i915_gem_request *request; int ret; - request = rcu_dereference_protected(active->request, - lockdep_is_held(mutex)); + request = i915_gem_active_raw(active, mutex); if (!request) return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 1327961..aa050fa 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -115,17 +115,28 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) base = bsm & INTEL_BSM_MASK; } else if (IS_I865G(dev)) { + u32 tseg_size = 0; u16 toud = 0; + u8 tmp; + + pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), + I845_ESMRAMC, &tmp); + + if (tmp & TSEG_ENABLE) { + switch (tmp & I845_TSEG_SIZE_MASK) { + case I845_TSEG_SIZE_512K: + tseg_size = KB(512); + break; + case I845_TSEG_SIZE_1M: + tseg_size = MB(1); + break; + } + } - /* - * FIXME is the graphics stolen memory region - * always at TOUD? Ie. is it always the last - * one to be allocated by the BIOS? - */ pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0), I865_TOUD, &toud); - base = toud << 16; + base = (toud << 16) + tseg_size; } else if (IS_I85X(dev)) { u32 tseg_size = 0; u32 tom; @@ -685,7 +696,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, if (gtt_offset == I915_GTT_OFFSET_NONE) return obj; - vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base); + vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err; @@ -705,6 +716,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, goto err; } + vma->pages = obj->pages; vma->flags |= I915_VMA_GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index f4b984d..a14b1e3 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -116,37 +116,58 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) return true; } -/* Is the current GTT allocation valid for the change in tiling? */ -static bool -i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) +static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); u32 size; - if (tiling_mode == I915_TILING_NONE) - return true; - - if (INTEL_GEN(dev_priv) >= 4) + if (!i915_vma_is_map_and_fenceable(vma)) return true; - if (IS_GEN3(dev_priv)) { - if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) + if (INTEL_GEN(dev_priv) == 3) { + if (vma->node.start & ~I915_FENCE_START_MASK) return false; } else { - if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) + if (vma->node.start & ~I830_FENCE_START_MASK) return false; } - size = i915_gem_get_ggtt_size(dev_priv, obj->base.size, tiling_mode); - if (i915_gem_obj_ggtt_size(obj) != size) + size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode); + if (vma->node.size < size) return false; - if (i915_gem_obj_ggtt_offset(obj) & (size - 1)) + if (vma->node.start & (size - 1)) return false; return true; } +/* Make the current GTT allocation valid for the change in tiling. */ +static int +i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct i915_vma *vma; + int ret; + + if (tiling_mode == I915_TILING_NONE) + return 0; + + if (INTEL_GEN(dev_priv) >= 4) + return 0; + + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (i915_vma_fence_prepare(vma, tiling_mode)) + continue; + + ret = i915_vma_unbind(vma); + if (ret) + return ret; + } + + return 0; +} + /** * i915_gem_set_tiling - IOCTL handler to set tiling mode * @dev: DRM device @@ -168,7 +189,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_i915_gem_set_tiling *args = data; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; - int ret = 0; + int err = 0; /* Make sure we don't cross-contaminate obj->tiling_and_stride */ BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK); @@ -187,7 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); if (obj->pin_display || obj->framebuffer_references) { - ret = -EBUSY; + err = -EBUSY; goto err; } @@ -234,11 +255,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, * has to also include the unfenced register the GPU uses * whilst executing a fenced command for an untiled object. */ - if (obj->map_and_fenceable && - !i915_gem_object_fence_ok(obj, args->tiling_mode)) - ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); - if (ret == 0) { + err = i915_gem_object_fence_prepare(obj, args->tiling_mode); + if (!err) { + struct i915_vma *vma; + if (obj->pages && obj->madv == I915_MADV_WILLNEED && dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { @@ -248,11 +269,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, i915_gem_object_pin_pages(obj); } - obj->fence_dirty = - !i915_gem_active_is_idle(&obj->last_fence, - &dev->struct_mutex) || - obj->fence_reg != I915_FENCE_REG_NONE; + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (!vma->fence) + continue; + vma->fence->dirty = true; + } obj->tiling_and_stride = args->stride | args->tiling_mode; @@ -281,7 +303,7 @@ err: intel_runtime_pm_put(dev_priv); - return ret; + return err; } /** diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 57218cc..be54825 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -542,8 +542,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) } } obj->userptr.work = ERR_PTR(ret); - if (ret) - __i915_gem_userptr_set_active(obj, false); } obj->userptr.workers--; @@ -628,15 +626,14 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) * to the vma (discard or cloning) which should prevent the more * egregious cases from causing harm. */ - if (IS_ERR(obj->userptr.work)) { - /* active flag will have been dropped already by the worker */ - ret = PTR_ERR(obj->userptr.work); - obj->userptr.work = NULL; - return ret; - } - if (obj->userptr.work) + + if (obj->userptr.work) { /* active flag should still be held for the pending work */ - return -EAGAIN; + if (IS_ERR(obj->userptr.work)) + return PTR_ERR(obj->userptr.work); + else + return -EAGAIN; + } /* Let the mmu-notifier know that we have begun and need cancellation */ ret = __i915_gem_userptr_set_active(obj, true); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index eecb870..41ec7a1 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -42,16 +42,6 @@ static const char *engine_str(int engine) } } -static const char *pin_flag(int pinned) -{ - if (pinned > 0) - return " P"; - else if (pinned < 0) - return " p"; - else - return ""; -} - static const char *tiling_flag(int tiling) { switch (tiling) { @@ -189,7 +179,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, { int i; - err_printf(m, " %s [%d]:\n", name, count); + err_printf(m, "%s [%d]:\n", name, count); while (count--) { err_printf(m, " %08x_%08x %8u %02x %02x [ ", @@ -202,7 +192,6 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, err_printf(m, "%02x ", err->rseqno[i]); err_printf(m, "] %02x", err->wseqno); - err_puts(m, pin_flag(err->pinned)); err_puts(m, tiling_flag(err->tiling)); err_puts(m, dirty_flag(err->dirty)); err_puts(m, purgeable_flag(err->purgeable)); @@ -247,14 +236,23 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, err_printf(m, " HEAD: 0x%08x\n", ee->head); err_printf(m, " TAIL: 0x%08x\n", ee->tail); err_printf(m, " CTL: 0x%08x\n", ee->ctl); + err_printf(m, " MODE: 0x%08x\n", ee->mode); err_printf(m, " HWS: 0x%08x\n", ee->hws); err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ee->acthd>>32), (u32)ee->acthd); err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir); err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr); err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone); + if (ee->batchbuffer) { + u64 start = ee->batchbuffer->gtt_offset; + u64 end = start + ee->batchbuffer->gtt_size; + + err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n", + upper_32_bits(start), lower_32_bits(start), + upper_32_bits(end), lower_32_bits(end)); + } if (INTEL_GEN(m->i915) >= 4) { - err_printf(m, " BBADDR: 0x%08x %08x\n", + err_printf(m, " BBADDR: 0x%08x_%08x\n", (u32)(ee->bbaddr>>32), (u32)ee->bbaddr); err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate); err_printf(m, " INSTPS: 0x%08x\n", ee->instps); @@ -323,6 +321,16 @@ static void print_error_obj(struct drm_i915_error_state_buf *m, } } +static void err_print_capabilities(struct drm_i915_error_state_buf *m, + const struct intel_device_info *info) +{ +#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x)) +#define SEP_SEMICOLON ; + DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); +#undef PRINT_FLAG +#undef SEP_SEMICOLON +} + int i915_error_state_to_str(struct drm_i915_error_state_buf *m, const struct i915_error_state_file_priv *error_priv) { @@ -342,6 +350,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, error->time.tv_usec); err_printf(m, "Kernel: " UTS_RELEASE "\n"); + err_print_capabilities(m, &error->device_info); max_hangcheck_score = 0; for (i = 0; i < ARRAY_SIZE(error->engine); i++) { if (error->engine[i].hangcheck_score > max_hangcheck_score) @@ -414,18 +423,33 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, error_print_engine(m, &error->engine[i]); } - for (i = 0; i < error->vm_count; i++) { - err_printf(m, "vm[%d]\n", i); + for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) { + char buf[128]; + int len, first = 1; + + if (!error->active_vm[i]) + break; + + len = scnprintf(buf, sizeof(buf), "Active ("); + for (j = 0; j < ARRAY_SIZE(error->engine); j++) { + if (error->engine[j].vm != error->active_vm[i]) + continue; - print_error_buffers(m, "Active", + len += scnprintf(buf + len, sizeof(buf), "%s%s", + first ? "" : ", ", + dev_priv->engine[j].name); + first = 0; + } + scnprintf(buf + len, sizeof(buf), ")"); + print_error_buffers(m, buf, error->active_bo[i], error->active_bo_count[i]); - - print_error_buffers(m, "Pinned", - error->pinned_bo[i], - error->pinned_bo_count[i]); } + print_error_buffers(m, "Pinned (global)", + error->pinned_bo, + error->pinned_bo_count); + for (i = 0; i < ARRAY_SIZE(error->engine); i++) { struct drm_i915_error_engine *ee = &error->engine[i]; @@ -455,9 +479,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, dev_priv->engine[i].name, ee->num_requests); for (j = 0; j < ee->num_requests; j++) { - err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", + err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n", + ee->requests[j].pid, ee->requests[j].seqno, ee->requests[j].jiffies, + ee->requests[j].head, ee->requests[j].tail); } } @@ -533,7 +559,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } - if ((obj = error->semaphore_obj)) { + if ((obj = error->semaphore)) { err_printf(m, "Semaphore page = 0x%08x\n", lower_32_bits(obj->gtt_offset)); for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { @@ -624,15 +650,12 @@ static void i915_error_state_free(struct kref *error_ref) kfree(ee->waiters); } - i915_error_object_free(error->semaphore_obj); + i915_error_object_free(error->semaphore); - for (i = 0; i < error->vm_count; i++) + for (i = 0; i < ARRAY_SIZE(error->active_bo); i++) kfree(error->active_bo[i]); - - kfree(error->active_bo); - kfree(error->active_bo_count); kfree(error->pinned_bo); - kfree(error->pinned_bo_count); + kfree(error->overlay); kfree(error->display); kfree(error); @@ -640,46 +663,45 @@ static void i915_error_state_free(struct kref *error_ref) static struct drm_i915_error_object * i915_error_object_create(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *src, - struct i915_address_space *vm) + struct i915_vma *vma) { struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_gem_object *src; struct drm_i915_error_object *dst; - struct i915_vma *vma = NULL; int num_pages; bool use_ggtt; int i = 0; u64 reloc_offset; - if (src == NULL || src->pages == NULL) + if (!vma) + return NULL; + + src = vma->obj; + if (!src->pages) return NULL; num_pages = src->base.size >> PAGE_SHIFT; dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); - if (dst == NULL) + if (!dst) return NULL; - if (i915_gem_obj_bound(src, vm)) - dst->gtt_offset = i915_gem_obj_offset(src, vm); - else - dst->gtt_offset = -1; + dst->gtt_offset = vma->node.start; + dst->gtt_size = vma->node.size; reloc_offset = dst->gtt_offset; - if (i915_is_ggtt(vm)) - vma = i915_gem_obj_to_ggtt(src); use_ggtt = (src->cache_level == I915_CACHE_NONE && - vma && (vma->flags & I915_VMA_GLOBAL_BIND) && + (vma->flags & I915_VMA_GLOBAL_BIND) && reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end); /* Cannot access stolen address directly, try to use the aperture */ if (src->stolen) { use_ggtt = true; - if (!(vma && vma->flags & I915_VMA_GLOBAL_BIND)) + if (!(vma->flags & I915_VMA_GLOBAL_BIND)) goto unwind; - reloc_offset = i915_gem_obj_ggtt_offset(src); + reloc_offset = vma->node.start; if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end) goto unwind; } @@ -707,7 +729,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, * captures what the GPU read. */ - s = io_mapping_map_atomic_wc(ggtt->mappable, + s = io_mapping_map_atomic_wc(&ggtt->mappable, reloc_offset); memcpy_fromio(d, s, PAGE_SIZE); io_mapping_unmap_atomic(s); @@ -739,8 +761,6 @@ unwind: kfree(dst); return NULL; } -#define i915_error_ggtt_object_create(dev_priv, src) \ - i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base) /* The error capture is special as tries to run underneath the normal * locking rules - so we use the raw version of the i915_gem_active lookup. @@ -777,10 +797,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->gtt_offset = vma->node.start; err->read_domains = obj->base.read_domains; err->write_domain = obj->base.write_domain; - err->fence_reg = obj->fence_reg; - err->pinned = 0; - if (i915_gem_obj_is_pinned(obj)) - err->pinned = 1; + err->fence_reg = vma->fence ? vma->fence->id : -1; err->tiling = i915_gem_object_get_tiling(obj); err->dirty = obj->dirty; err->purgeable = obj->madv != I915_MADV_WILLNEED; @@ -788,13 +805,17 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->cache_level = obj->cache_level; } -static u32 capture_active_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head) +static u32 capture_error_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head, + bool pinned_only) { struct i915_vma *vma; int i = 0; list_for_each_entry(vma, head, vm_link) { + if (pinned_only && !i915_vma_is_pinned(vma)) + continue; + capture_bo(err++, vma); if (++i == count) break; @@ -803,28 +824,6 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err, return i; } -static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head, - struct i915_address_space *vm) -{ - struct drm_i915_gem_object *obj; - struct drm_i915_error_buffer * const first = err; - struct drm_i915_error_buffer * const last = err + count; - - list_for_each_entry(obj, head, global_list) { - struct i915_vma *vma; - - if (err == last) - break; - - list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->vm == vm && i915_vma_is_pinned(vma)) - capture_bo(err++, vma); - } - - return err - first; -} - /* Generate a semi-unique error code. The code is not meant to have meaning, The * code's only purpose is to try to prevent false duplicated bug reports by * grossly estimating a GPU error state. @@ -884,7 +883,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error, struct intel_engine_cs *to; enum intel_engine_id id; - if (!error->semaphore_obj) + if (!error->semaphore) return; for_each_engine_id(to, dev_priv, id) { @@ -897,7 +896,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error, signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4; - tmp = error->semaphore_obj->pages[0]; + tmp = error->semaphore->pages[0]; idx = intel_engine_sync_index(engine, to); ee->semaphore_mboxes[idx] = tmp[signal_offset]; @@ -1007,6 +1006,8 @@ static void error_record_engine_registers(struct drm_i915_error_state *error, ee->head = I915_READ_HEAD(engine); ee->tail = I915_READ_TAIL(engine); ee->ctl = I915_READ_CTL(engine); + if (INTEL_GEN(dev_priv) > 2) + ee->mode = I915_READ_MODE(engine); if (I915_NEED_GFX_HWS(dev_priv)) { i915_reg_t mmio; @@ -1062,45 +1063,76 @@ static void error_record_engine_registers(struct drm_i915_error_state *error, } } - -static void i915_gem_record_active_context(struct intel_engine_cs *engine, - struct drm_i915_error_state *error, - struct drm_i915_error_engine *ee) +static void engine_record_requests(struct intel_engine_cs *engine, + struct drm_i915_gem_request *first, + struct drm_i915_error_engine *ee) { - struct drm_i915_private *dev_priv = engine->i915; - struct drm_i915_gem_object *obj; + struct drm_i915_gem_request *request; + int count; - /* Currently render ring is the only HW context user */ - if (engine->id != RCS || !error->ccid) + count = 0; + request = first; + list_for_each_entry_from(request, &engine->request_list, link) + count++; + if (!count) return; - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!i915_gem_obj_ggtt_bound(obj)) - continue; + ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC); + if (!ee->requests) + return; + + ee->num_requests = count; - if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { - ee->ctx = i915_error_ggtt_object_create(dev_priv, obj); + count = 0; + request = first; + list_for_each_entry_from(request, &engine->request_list, link) { + struct drm_i915_error_request *erq; + + if (count >= ee->num_requests) { + /* + * If the ring request list was changed in + * between the point where the error request + * list was created and dimensioned and this + * point then just exit early to avoid crashes. + * + * We don't need to communicate that the + * request list changed state during error + * state capture and that the error state is + * slightly incorrect as a consequence since we + * are typically only interested in the request + * list state at the point of error state + * capture, not in any changes happening during + * the capture. + */ break; } + + erq = &ee->requests[count++]; + erq->seqno = request->fence.seqno; + erq->jiffies = request->emitted_jiffies; + erq->head = request->head; + erq->tail = request->tail; + + rcu_read_lock(); + erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0; + rcu_read_unlock(); } + ee->num_requests = count; } static void i915_gem_record_rings(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct drm_i915_gem_request *request; - int i, count; + int i; - if (dev_priv->semaphore_obj) { - error->semaphore_obj = - i915_error_ggtt_object_create(dev_priv, - dev_priv->semaphore_obj); - } + error->semaphore = + i915_error_object_create(dev_priv, dev_priv->semaphore); for (i = 0; i < I915_NUM_ENGINES; i++) { struct intel_engine_cs *engine = &dev_priv->engine[i]; struct drm_i915_error_engine *ee = &error->engine[i]; + struct drm_i915_gem_request *request; ee->pid = -1; ee->engine_id = -1; @@ -1115,10 +1147,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, request = i915_gem_find_active_request(engine); if (request) { - struct i915_address_space *vm; struct intel_ring *ring; + struct pid *pid; - vm = request->ctx->ppgtt ? + ee->vm = request->ctx->ppgtt ? &request->ctx->ppgtt->base : &ggtt->base; /* We need to copy these to an anonymous buffer @@ -1127,19 +1159,23 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, */ ee->batchbuffer = i915_error_object_create(dev_priv, - request->batch_obj, - vm); + request->batch); if (HAS_BROKEN_CS_TLB(dev_priv)) ee->wa_batchbuffer = - i915_error_ggtt_object_create(dev_priv, - engine->scratch.obj); + i915_error_object_create(dev_priv, + engine->scratch); + + ee->ctx = + i915_error_object_create(dev_priv, + request->ctx->engine[i].state); - if (request->pid) { + pid = request->ctx->pid; + if (pid) { struct task_struct *task; rcu_read_lock(); - task = pid_task(request->pid, PIDTYPE_PID); + task = pid_task(pid, PIDTYPE_PID); if (task) { strcpy(ee->comm, task->comm); ee->pid = task->pid; @@ -1154,145 +1190,102 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, ee->cpu_ring_head = ring->head; ee->cpu_ring_tail = ring->tail; ee->ringbuffer = - i915_error_ggtt_object_create(dev_priv, - ring->obj); - } - - ee->hws_page = - i915_error_ggtt_object_create(dev_priv, - engine->status_page.obj); + i915_error_object_create(dev_priv, ring->vma); - ee->wa_ctx = i915_error_ggtt_object_create(dev_priv, - engine->wa_ctx.obj); - - i915_gem_record_active_context(engine, error, ee); - - count = 0; - list_for_each_entry(request, &engine->request_list, link) - count++; - - ee->num_requests = count; - ee->requests = - kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC); - if (!ee->requests) { - ee->num_requests = 0; - continue; + engine_record_requests(engine, request, ee); } - count = 0; - list_for_each_entry(request, &engine->request_list, link) { - struct drm_i915_error_request *erq; - - if (count >= ee->num_requests) { - /* - * If the ring request list was changed in - * between the point where the error request - * list was created and dimensioned and this - * point then just exit early to avoid crashes. - * - * We don't need to communicate that the - * request list changed state during error - * state capture and that the error state is - * slightly incorrect as a consequence since we - * are typically only interested in the request - * list state at the point of error state - * capture, not in any changes happening during - * the capture. - */ - break; - } + ee->hws_page = + i915_error_object_create(dev_priv, + engine->status_page.vma); - erq = &ee->requests[count++]; - erq->seqno = request->fence.seqno; - erq->jiffies = request->emitted_jiffies; - erq->tail = request->postfix; - } + ee->wa_ctx = + i915_error_object_create(dev_priv, engine->wa_ctx.vma); } } -/* FIXME: Since pin count/bound list is global, we duplicate what we capture per - * VM. - */ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, struct i915_address_space *vm, - const int ndx) + int idx) { - struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL; - struct drm_i915_gem_object *obj; + struct drm_i915_error_buffer *active_bo; struct i915_vma *vma; - int i; + int count; - i = 0; + count = 0; list_for_each_entry(vma, &vm->active_list, vm_link) - i++; - error->active_bo_count[ndx] = i; - - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->vm == vm && i915_vma_is_pinned(vma)) - i++; - } - error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; - - if (i) { - active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC); - if (active_bo) - pinned_bo = active_bo + error->active_bo_count[ndx]; - } + count++; + active_bo = NULL; + if (count) + active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC); if (active_bo) - error->active_bo_count[ndx] = - capture_active_bo(active_bo, - error->active_bo_count[ndx], - &vm->active_list); - - if (pinned_bo) - error->pinned_bo_count[ndx] = - capture_pinned_bo(pinned_bo, - error->pinned_bo_count[ndx], - &dev_priv->mm.bound_list, vm); - error->active_bo[ndx] = active_bo; - error->pinned_bo[ndx] = pinned_bo; + count = capture_error_bo(active_bo, count, &vm->active_list, false); + else + count = 0; + + error->active_vm[idx] = vm; + error->active_bo[idx] = active_bo; + error->active_bo_count[idx] = count; } -static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, - struct drm_i915_error_state *error) +static void i915_capture_active_buffers(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) { - struct i915_address_space *vm; - int cnt = 0, i = 0; - - list_for_each_entry(vm, &dev_priv->vm_list, global_link) - cnt++; - - error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); - error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); - error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), - GFP_ATOMIC); - error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), - GFP_ATOMIC); - - if (error->active_bo == NULL || - error->pinned_bo == NULL || - error->active_bo_count == NULL || - error->pinned_bo_count == NULL) { - kfree(error->active_bo); - kfree(error->active_bo_count); - kfree(error->pinned_bo); - kfree(error->pinned_bo_count); - - error->active_bo = NULL; - error->active_bo_count = NULL; - error->pinned_bo = NULL; - error->pinned_bo_count = NULL; - } else { - list_for_each_entry(vm, &dev_priv->vm_list, global_link) - i915_gem_capture_vm(dev_priv, error, vm, i++); + int cnt = 0, i, j; + + BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo)); + BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm)); + BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count)); + + /* Scan each engine looking for unique active contexts/vm */ + for (i = 0; i < ARRAY_SIZE(error->engine); i++) { + struct drm_i915_error_engine *ee = &error->engine[i]; + bool found; + + if (!ee->vm) + continue; - error->vm_count = cnt; + found = false; + for (j = 0; j < i && !found; j++) + found = error->engine[j].vm == ee->vm; + if (!found) + i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++); } } +static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) +{ + struct i915_address_space *vm = &dev_priv->ggtt.base; + struct drm_i915_error_buffer *bo; + struct i915_vma *vma; + int count_inactive, count_active; + + count_inactive = 0; + list_for_each_entry(vma, &vm->active_list, vm_link) + count_inactive++; + + count_active = 0; + list_for_each_entry(vma, &vm->inactive_list, vm_link) + count_active++; + + bo = NULL; + if (count_inactive + count_active) + bo = kcalloc(count_inactive + count_active, + sizeof(*bo), GFP_ATOMIC); + if (!bo) + return; + + count_inactive = capture_error_bo(bo, count_inactive, + &vm->active_list, true); + count_active = capture_error_bo(bo + count_inactive, count_active, + &vm->inactive_list, true); + error->pinned_bo_count = count_inactive + count_active; + error->pinned_bo = bo; +} + /* Capture all registers which don't fit into another category. */ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) @@ -1403,6 +1396,10 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv, #endif error->reset_count = i915_reset_count(&dev_priv->gpu_error); error->suspend_count = dev_priv->suspend_count; + + memcpy(&error->device_info, + INTEL_INFO(dev_priv), + sizeof(error->device_info)); } /** @@ -1436,9 +1433,10 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, i915_capture_gen_state(dev_priv, error); i915_capture_reg_state(dev_priv, error); - i915_gem_capture_buffers(dev_priv, error); i915_gem_record_fences(dev_priv, error); i915_gem_record_rings(dev_priv, error); + i915_capture_active_buffers(dev_priv, error); + i915_capture_pinned_buffers(dev_priv, error); do_gettimeofday(&error->time); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 03a5cef..e436941 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -183,7 +183,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc, struct i915_guc_client *client, u16 new_id) { - struct sg_table *sg = guc->ctx_pool_obj->pages; + struct sg_table *sg = guc->ctx_pool_vma->pages; void *doorbell_bitmap = guc->doorbell_bitmap; struct guc_doorbell_info *doorbell; struct guc_context_desc desc; @@ -325,7 +325,6 @@ static void guc_init_proc_desc(struct intel_guc *guc, static void guc_init_ctx_desc(struct intel_guc *guc, struct i915_guc_client *client) { - struct drm_i915_gem_object *client_obj = client->client_obj; struct drm_i915_private *dev_priv = guc_to_i915(guc); struct intel_engine_cs *engine; struct i915_gem_context *ctx = client->owner; @@ -340,10 +339,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.priority = client->priority; desc.db_id = client->doorbell_id; - for_each_engine(engine, dev_priv) { + for_each_engine_masked(engine, dev_priv, client->engines) { struct intel_context *ce = &ctx->engine[engine->id]; - struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; - struct drm_i915_gem_object *obj; + uint32_t guc_engine_id = engine->guc_id; + struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id]; /* TODO: We have a design issue to be solved here. Only when we * receive the first batch, we know which engine is used by the @@ -358,30 +357,29 @@ static void guc_init_ctx_desc(struct intel_guc *guc, lrc->context_desc = lower_32_bits(ce->lrc_desc); /* The state page is after PPHWSP */ - gfx_addr = i915_gem_obj_ggtt_offset(ce->state); - lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; + lrc->ring_lcra = + i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | - (engine->guc_id << GUC_ELC_ENGINE_OFFSET); - - obj = ce->ring->obj; - gfx_addr = i915_gem_obj_ggtt_offset(obj); + (guc_engine_id << GUC_ELC_ENGINE_OFFSET); - lrc->ring_begin = gfx_addr; - lrc->ring_end = gfx_addr + obj->base.size - 1; - lrc->ring_next_free_location = gfx_addr; + lrc->ring_begin = i915_ggtt_offset(ce->ring->vma); + lrc->ring_end = lrc->ring_begin + ce->ring->size - 1; + lrc->ring_next_free_location = lrc->ring_begin; lrc->ring_current_tail_pointer_value = 0; - desc.engines_used |= (1 << engine->guc_id); + desc.engines_used |= (1 << guc_engine_id); } + DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n", + client->engines, desc.engines_used); WARN_ON(desc.engines_used == 0); /* * The doorbell, process descriptor, and workqueue are all parts * of the client object, which the GuC will reference via the GGTT */ - gfx_addr = i915_gem_obj_ggtt_offset(client_obj); - desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) + + gfx_addr = i915_ggtt_offset(client->vma); + desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + client->doorbell_offset; desc.db_trigger_cpu = (uintptr_t)client->client_base + client->doorbell_offset; @@ -397,7 +395,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.desc_private = (uintptr_t)client; /* Pool context is pinned already */ - sg = guc->ctx_pool_obj->pages; + sg = guc->ctx_pool_vma->pages; sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), sizeof(desc) * client->ctx_index); } @@ -410,7 +408,7 @@ static void guc_fini_ctx_desc(struct intel_guc *guc, memset(&desc, 0, sizeof(desc)); - sg = guc->ctx_pool_obj->pages; + sg = guc->ctx_pool_vma->pages; sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), sizeof(desc) * client->ctx_index); } @@ -457,6 +455,7 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc, /* wqi_len is in DWords, and does not include the one-word header */ const size_t wqi_size = sizeof(struct guc_wq_item); const u32 wqi_len = wqi_size/sizeof(u32) - 1; + struct intel_engine_cs *engine = rq->engine; struct guc_process_desc *desc; struct guc_wq_item *wqi; void *base; @@ -492,18 +491,17 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc, /* WQ starts from the page after doorbell / process_desc */ wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT; wq_off &= PAGE_SIZE - 1; - base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page)); + base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page)); wqi = (struct guc_wq_item *)((char *)base + wq_off); /* Now fill in the 4-word work queue item */ wqi->header = WQ_TYPE_INORDER | (wqi_len << WQ_LEN_SHIFT) | - (rq->engine->guc_id << WQ_TARGET_SHIFT) | + (engine->guc_id << WQ_TARGET_SHIFT) | WQ_NO_WCFLUSH_WAIT; /* The GuC wants only the low-order word of the context descriptor */ - wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, - rq->engine); + wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine); wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; wqi->fence_id = rq->fence.seqno; @@ -611,55 +609,48 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq) */ /** - * gem_allocate_guc_obj() - Allocate gem object for GuC usage - * @dev_priv: driver private data structure - * @size: size of object + * guc_allocate_vma() - Allocate a GGTT VMA for GuC usage + * @guc: the guc + * @size: size of area to allocate (both virtual space and memory) * - * This is a wrapper to create a gem obj. In order to use it inside GuC, the - * object needs to be pinned lifetime. Also we must pin it to gtt space other - * than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC. + * This is a wrapper to create an object for use with the GuC. In order to + * use it inside the GuC, an object needs to be pinned lifetime, so we allocate + * both some backing storage and a range inside the Global GTT. We must pin + * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that + * range is reserved inside GuC. * - * Return: A drm_i915_gem_object if successful, otherwise NULL. + * Return: A i915_vma if successful, otherwise an ERR_PTR. */ -static struct drm_i915_gem_object * -gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size) +static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size) { + struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int ret; obj = i915_gem_object_create(&dev_priv->drm, size); if (IS_ERR(obj)) - return NULL; + return ERR_CAST(obj); - if (i915_gem_object_get_pages(obj)) { - i915_gem_object_put(obj); - return NULL; - } + vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); + if (IS_ERR(vma)) + goto err; - if (i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) { - i915_gem_object_put(obj); - return NULL; + ret = i915_vma_pin(vma, 0, PAGE_SIZE, + PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + if (ret) { + vma = ERR_PTR(ret); + goto err; } /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */ I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); - return obj; -} - -/** - * gem_release_guc_obj() - Release gem object allocated for GuC usage - * @obj: gem obj to be released - */ -static void gem_release_guc_obj(struct drm_i915_gem_object *obj) -{ - if (!obj) - return; - - if (i915_gem_obj_is_pinned(obj)) - i915_gem_object_ggtt_unpin(obj); + return vma; +err: i915_gem_object_put(obj); + return vma; } static void @@ -686,7 +677,7 @@ guc_client_free(struct drm_i915_private *dev_priv, kunmap(kmap_to_page(client->client_base)); } - gem_release_guc_obj(client->client_obj); + i915_vma_unpin_and_release(&client->vma); if (client->ctx_index != GUC_INVALID_CTX_ID) { guc_fini_ctx_desc(guc, client); @@ -696,29 +687,47 @@ guc_client_free(struct drm_i915_private *dev_priv, kfree(client); } +/* Check that a doorbell register is in the expected state */ +static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + i915_reg_t drbreg = GEN8_DRBREGL(db_id); + uint32_t value = I915_READ(drbreg); + bool enabled = (value & GUC_DOORBELL_ENABLED) != 0; + bool expected = test_bit(db_id, guc->doorbell_bitmap); + + if (enabled == expected) + return true; + + DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n", + db_id, drbreg.reg, value, + expected ? "active" : "inactive"); + + return false; +} + /* - * Borrow the first client to set up & tear down every doorbell + * Borrow the first client to set up & tear down each unused doorbell * in turn, to ensure that all doorbell h/w is (re)initialised. */ static void guc_init_doorbell_hw(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); struct i915_guc_client *client = guc->execbuf_client; - uint16_t db_id, i; - int err; + uint16_t db_id; + int i, err; + /* Save client's original doorbell selection */ db_id = client->doorbell_id; for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { - i915_reg_t drbreg = GEN8_DRBREGL(i); - u32 value = I915_READ(drbreg); + /* Skip if doorbell is OK */ + if (guc_doorbell_check(guc, i)) + continue; err = guc_update_doorbell_id(guc, client, i); - - /* Report update failure or unexpectedly active doorbell */ - if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED))) - DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n", - i, drbreg.reg, value, err); + if (err) + DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n", + i, err); } /* Restore to original value */ @@ -727,20 +736,15 @@ static void guc_init_doorbell_hw(struct intel_guc *guc) DRM_ERROR("Failed to restore doorbell to %d, err %d\n", db_id, err); - for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { - i915_reg_t drbreg = GEN8_DRBREGL(i); - u32 value = I915_READ(drbreg); - - if (i != db_id && (value & GUC_DOORBELL_ENABLED)) - DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n", - i, drbreg.reg, value); - - } + /* Read back & verify all doorbell registers */ + for (i = 0; i < GUC_MAX_DOORBELLS; ++i) + (void)guc_doorbell_check(guc, i); } /** * guc_client_alloc() - Allocate an i915_guc_client * @dev_priv: driver private data structure + * @engines: The set of engines to enable for this client * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW * The kernel client to replace ExecList submission is created with * NORMAL priority. Priority of a client for scheduler can be HIGH, @@ -752,22 +756,24 @@ static void guc_init_doorbell_hw(struct intel_guc *guc) */ static struct i915_guc_client * guc_client_alloc(struct drm_i915_private *dev_priv, + uint32_t engines, uint32_t priority, struct i915_gem_context *ctx) { struct i915_guc_client *client; struct intel_guc *guc = &dev_priv->guc; - struct drm_i915_gem_object *obj; + struct i915_vma *vma; uint16_t db_id; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) return NULL; - client->doorbell_id = GUC_INVALID_DOORBELL_ID; - client->priority = priority; client->owner = ctx; client->guc = guc; + client->engines = engines; + client->priority = priority; + client->doorbell_id = GUC_INVALID_DOORBELL_ID; client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0, GUC_MAX_GPU_CONTEXTS, GFP_KERNEL); @@ -777,13 +783,13 @@ guc_client_alloc(struct drm_i915_private *dev_priv, } /* The first page is doorbell/proc_desc. Two followed pages are wq. */ - obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE); - if (!obj) + vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE); + if (IS_ERR(vma)) goto err; /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */ - client->client_obj = obj; - client->client_base = kmap(i915_gem_object_get_page(obj, 0)); + client->vma = vma; + client->client_base = kmap(i915_vma_first_page(vma)); client->wq_offset = GUC_DB_SIZE; client->wq_size = GUC_WQ_SIZE; @@ -809,8 +815,8 @@ guc_client_alloc(struct drm_i915_private *dev_priv, if (guc_init_doorbell(guc, client, db_id)) goto err; - DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n", - priority, client, client->ctx_index); + DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n", + priority, client, client->engines, client->ctx_index); DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n", client->doorbell_id, client->doorbell_offset); @@ -825,8 +831,7 @@ err: static void guc_create_log(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct drm_i915_gem_object *obj; + struct i915_vma *vma; unsigned long offset; uint32_t size, flags; @@ -842,16 +847,16 @@ static void guc_create_log(struct intel_guc *guc) GUC_LOG_ISR_PAGES + 1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT; - obj = guc->log_obj; - if (!obj) { - obj = gem_allocate_guc_obj(dev_priv, size); - if (!obj) { + vma = guc->log_vma; + if (!vma) { + vma = guc_allocate_vma(guc, size); + if (IS_ERR(vma)) { /* logging will be off */ i915.guc_log_level = -1; return; } - guc->log_obj = obj; + guc->log_vma = vma; } /* each allocated unit is a page */ @@ -860,7 +865,7 @@ static void guc_create_log(struct intel_guc *guc) (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); - offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */ + offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */ guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; } @@ -889,7 +894,7 @@ static void init_guc_policies(struct guc_policies *policies) static void guc_create_ads(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct drm_i915_gem_object *obj; + struct i915_vma *vma; struct guc_ads *ads; struct guc_policies *policies; struct guc_mmio_reg_state *reg_state; @@ -902,16 +907,16 @@ static void guc_create_ads(struct intel_guc *guc) sizeof(struct guc_mmio_reg_state) + GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE; - obj = guc->ads_obj; - if (!obj) { - obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size)); - if (!obj) + vma = guc->ads_vma; + if (!vma) { + vma = guc_allocate_vma(guc, PAGE_ALIGN(size)); + if (IS_ERR(vma)) return; - guc->ads_obj = obj; + guc->ads_vma = vma; } - page = i915_gem_object_get_page(obj, 0); + page = i915_vma_first_page(vma); ads = kmap(page); /* @@ -922,7 +927,7 @@ static void guc_create_ads(struct intel_guc *guc) * to find it. */ engine = &dev_priv->engine[RCS]; - ads->golden_context_lrca = engine->status_page.gfx_addr; + ads->golden_context_lrca = engine->status_page.ggtt_offset; for_each_engine(engine, dev_priv) ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine); @@ -931,8 +936,8 @@ static void guc_create_ads(struct intel_guc *guc) policies = (void *)ads + sizeof(struct guc_ads); init_guc_policies(policies); - ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) + - sizeof(struct guc_ads); + ads->scheduler_policies = + i915_ggtt_offset(vma) + sizeof(struct guc_ads); /* MMIO reg state */ reg_state = (void *)policies + sizeof(struct guc_policies); @@ -960,10 +965,9 @@ static void guc_create_ads(struct intel_guc *guc) */ int i915_guc_submission_init(struct drm_i915_private *dev_priv) { - const size_t ctxsize = sizeof(struct guc_context_desc); - const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize; - const size_t gemsize = round_up(poolsize, PAGE_SIZE); struct intel_guc *guc = &dev_priv->guc; + struct i915_vma *vma; + u32 size; /* Wipe bitmap & delete client in case of reinitialisation */ bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS); @@ -972,13 +976,15 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) if (!i915.enable_guc_submission) return 0; /* not enabled */ - if (guc->ctx_pool_obj) + if (guc->ctx_pool_vma) return 0; /* already allocated */ - guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize); - if (!guc->ctx_pool_obj) - return -ENOMEM; + size = PAGE_ALIGN(GUC_MAX_GPU_CONTEXTS*sizeof(struct guc_context_desc)); + vma = guc_allocate_vma(guc, size); + if (IS_ERR(vma)) + return PTR_ERR(vma); + guc->ctx_pool_vma = vma; ida_init(&guc->ctx_ids); guc_create_log(guc); guc_create_ads(guc); @@ -994,6 +1000,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) /* client for execbuf submission */ client = guc_client_alloc(dev_priv, + INTEL_INFO(dev_priv)->ring_mask, GUC_CTX_PRIORITY_KMD_NORMAL, dev_priv->kernel_context); if (!client) { @@ -1030,16 +1037,12 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv) { struct intel_guc *guc = &dev_priv->guc; - gem_release_guc_obj(dev_priv->guc.ads_obj); - guc->ads_obj = NULL; - - gem_release_guc_obj(dev_priv->guc.log_obj); - guc->log_obj = NULL; + i915_vma_unpin_and_release(&guc->ads_vma); + i915_vma_unpin_and_release(&guc->log_vma); - if (guc->ctx_pool_obj) + if (guc->ctx_pool_vma) ida_destroy(&guc->ctx_ids); - gem_release_guc_obj(guc->ctx_pool_obj); - guc->ctx_pool_obj = NULL; + i915_vma_unpin_and_release(&guc->ctx_pool_vma); } /** @@ -1062,7 +1065,7 @@ int intel_guc_suspend(struct drm_device *dev) /* any value greater than GUC_POWER_D0 */ data[1] = GUC_POWER_D1; /* first page is shared data with GuC */ - data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); + data[2] = i915_ggtt_offset(ctx->engine[RCS].state); return host2guc_action(guc, data, ARRAY_SIZE(data)); } @@ -1087,7 +1090,7 @@ int intel_guc_resume(struct drm_device *dev) data[0] = HOST2GUC_ACTION_EXIT_S_STATE; data[1] = GUC_POWER_D0; /* first page is shared data with GuC */ - data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); + data[2] = i915_ggtt_offset(ctx->engine[RCS].state); return host2guc_action(guc, data, ARRAY_SIZE(data)); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 591f452..ebb83d5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -972,10 +972,8 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) static void notify_ring(struct intel_engine_cs *engine) { smp_store_mb(engine->breadcrumbs.irq_posted, true); - if (intel_engine_wakeup(engine)) { + if (intel_engine_wakeup(engine)) trace_i915_gem_request_notify(engine); - engine->breadcrumbs.irq_wakeups++; - } } static void vlv_c0_read(struct drm_i915_private *dev_priv, @@ -3044,22 +3042,6 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) return HANGCHECK_HUNG; } -static unsigned long kick_waiters(struct intel_engine_cs *engine) -{ - struct drm_i915_private *i915 = engine->i915; - unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups); - - if (engine->hangcheck.user_interrupts == irq_count && - !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { - if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) - DRM_ERROR("Hangcheck timer elapsed... %s idle\n", - engine->name); - - intel_engine_enable_fake_irq(engine); - } - - return irq_count; -} /* * This is called when the chip hasn't reported back with completed * batchbuffers in a long time. We keep track per ring seqno progress and @@ -3097,7 +3079,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work) bool busy = intel_engine_has_waiter(engine); u64 acthd; u32 seqno; - unsigned user_interrupts; semaphore_clear_deadlocks(dev_priv); @@ -3114,15 +3095,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work) acthd = intel_engine_get_active_head(engine); seqno = intel_engine_get_seqno(engine); - /* Reset stuck interrupts between batch advances */ - user_interrupts = 0; - if (engine->hangcheck.seqno == seqno) { if (!intel_engine_is_active(engine)) { engine->hangcheck.action = HANGCHECK_IDLE; if (busy) { /* Safeguard against driver failure */ - user_interrupts = kick_waiters(engine); engine->hangcheck.score += BUSY; } } else { @@ -3185,7 +3162,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work) engine->hangcheck.seqno = seqno; engine->hangcheck.acthd = acthd; - engine->hangcheck.user_interrupts = user_interrupts; busy_count += busy; } diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c new file mode 100644 index 0000000..49a0794 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_memcpy.c @@ -0,0 +1,101 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/kernel.h> +#include <asm/fpu/api.h> + +#include "i915_drv.h" + +static DEFINE_STATIC_KEY_FALSE(has_movntdqa); + +#ifdef CONFIG_AS_MOVNTDQA +static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) +{ + kernel_fpu_begin(); + + len >>= 4; + while (len >= 4) { + asm("movntdqa (%0), %%xmm0\n" + "movntdqa 16(%0), %%xmm1\n" + "movntdqa 32(%0), %%xmm2\n" + "movntdqa 48(%0), %%xmm3\n" + "movaps %%xmm0, (%1)\n" + "movaps %%xmm1, 16(%1)\n" + "movaps %%xmm2, 32(%1)\n" + "movaps %%xmm3, 48(%1)\n" + :: "r" (src), "r" (dst) : "memory"); + src += 64; + dst += 64; + len -= 4; + } + while (len--) { + asm("movntdqa (%0), %%xmm0\n" + "movaps %%xmm0, (%1)\n" + :: "r" (src), "r" (dst) : "memory"); + src += 16; + dst += 16; + } + + kernel_fpu_end(); +} +#endif + +/** + * i915_memcpy_from_wc: perform an accelerated *aligned* read from WC + * @dst: destination pointer + * @src: source pointer + * @len: how many bytes to copy + * + * i915_memcpy_from_wc copies @len bytes from @src to @dst using + * non-temporal instructions where available. Note that all arguments + * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple + * of 16. + * + * To test whether accelerated reads from WC are supported, use + * i915_memcpy_from_wc(NULL, NULL, 0); + * + * Returns true if the copy was successful, false if the preconditions + * are not met. + */ +bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len) +{ + if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15)) + return false; + +#ifdef CONFIG_AS_MOVNTDQA + if (static_branch_likely(&has_movntdqa)) { + if (likely(len)) + __memcpy_ntdqa(dst, src, len); + return true; + } +#endif + + return false; +} + +void i915_memcpy_init_early(struct drm_i915_private *dev_priv) +{ + if (static_cpu_has(X86_FEATURE_XMM4_1)) + static_branch_enable(&has_movntdqa); +} diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c new file mode 100644 index 0000000..e4935dd --- /dev/null +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -0,0 +1,84 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/mm.h> +#include <linux/io-mapping.h> + +#include <asm/pgtable.h> + +#include "i915_drv.h" + +struct remap_pfn { + struct mm_struct *mm; + unsigned long pfn; + pgprot_t prot; +}; + +static int remap_pfn(pte_t *pte, pgtable_t token, + unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + + /* Special PTE are not associated with any struct page */ + set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); + r->pfn++; + + return 0; +} + +/** + * remap_io_mapping - remap an IO mapping to userspace + * @vma: user vma to map to + * @addr: target user address to start at + * @pfn: physical address of kernel memory + * @size: size of map area + * @iomap: the source io_mapping + * + * Note: this is only safe if the mm semaphore is held when called. + */ +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap) +{ + struct remap_pfn r; + int err; + + GEM_BUG_ON((vma->vm_flags & + (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)) != + (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)); + + /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + r.mm = vma->vm_mm; + r.pfn = pfn; + r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | + (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); + + err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r); + if (unlikely(err)) { + zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); + return err; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index b6e404c..768ad89 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -45,6 +45,7 @@ struct i915_params i915 __read_mostly = { .fastboot = 0, .prefault_disable = 0, .load_detect_test = 0, + .force_reset_modeset_test = 0, .reset = true, .invert_brightness = 0, .disable_display = 0, @@ -161,6 +162,11 @@ MODULE_PARM_DESC(load_detect_test, "Force-enable the VGA load detect code for testing (default:false). " "For developers only."); +module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600); +MODULE_PARM_DESC(force_reset_modeset_test, + "Force a modeset during gpu reset for testing (default:false). " + "For developers only."); + module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600); MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness " diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 0ad020b..3a0dd78 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -57,6 +57,7 @@ struct i915_params { bool fastboot; bool prefault_disable; bool load_detect_test; + bool force_reset_modeset_test; bool reset; bool disable_display; bool verbose_state_checks; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f38a5e2..d4adf28 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3660,8 +3660,17 @@ enum { #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) /* Panel power sequencing */ -#define PP_STATUS _MMIO(0x61200) -#define PP_ON (1 << 31) +#define PPS_BASE 0x61200 +#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) +#define PCH_PPS_BASE 0xC7200 + +#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->pps_mmio_base - \ + PPS_BASE + (reg) + \ + (pps_idx) * 0x100) + +#define _PP_STATUS 0x61200 +#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS) +#define PP_ON (1 << 31) /* * Indicates that all dependencies of the panel are on: * @@ -3669,14 +3678,14 @@ enum { * - pipe enabled * - LVDS/DVOB/DVOC on */ -#define PP_READY (1 << 30) -#define PP_SEQUENCE_NONE (0 << 28) -#define PP_SEQUENCE_POWER_UP (1 << 28) -#define PP_SEQUENCE_POWER_DOWN (2 << 28) -#define PP_SEQUENCE_MASK (3 << 28) -#define PP_SEQUENCE_SHIFT 28 -#define PP_CYCLE_DELAY_ACTIVE (1 << 27) -#define PP_SEQUENCE_STATE_MASK 0x0000000f +#define PP_READY (1 << 30) +#define PP_SEQUENCE_NONE (0 << 28) +#define PP_SEQUENCE_POWER_UP (1 << 28) +#define PP_SEQUENCE_POWER_DOWN (2 << 28) +#define PP_SEQUENCE_MASK (3 << 28) +#define PP_SEQUENCE_SHIFT 28 +#define PP_CYCLE_DELAY_ACTIVE (1 << 27) +#define PP_SEQUENCE_STATE_MASK 0x0000000f #define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) #define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) #define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) @@ -3686,11 +3695,46 @@ enum { #define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) #define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) #define PP_SEQUENCE_STATE_RESET (0xf << 0) -#define PP_CONTROL _MMIO(0x61204) -#define POWER_TARGET_ON (1 << 0) -#define PP_ON_DELAYS _MMIO(0x61208) -#define PP_OFF_DELAYS _MMIO(0x6120c) -#define PP_DIVISOR _MMIO(0x61210) + +#define _PP_CONTROL 0x61204 +#define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL) +#define PANEL_UNLOCK_REGS (0xabcd << 16) +#define PANEL_UNLOCK_MASK (0xffff << 16) +#define BXT_POWER_CYCLE_DELAY_MASK 0x1f0 +#define BXT_POWER_CYCLE_DELAY_SHIFT 4 +#define EDP_FORCE_VDD (1 << 3) +#define EDP_BLC_ENABLE (1 << 2) +#define PANEL_POWER_RESET (1 << 1) +#define PANEL_POWER_OFF (0 << 0) +#define PANEL_POWER_ON (1 << 0) + +#define _PP_ON_DELAYS 0x61208 +#define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS) +#define PANEL_PORT_SELECT_SHIFT 30 +#define PANEL_PORT_SELECT_MASK (3 << 30) +#define PANEL_PORT_SELECT_LVDS (0 << 30) +#define PANEL_PORT_SELECT_DPA (1 << 30) +#define PANEL_PORT_SELECT_DPC (2 << 30) +#define PANEL_PORT_SELECT_DPD (3 << 30) +#define PANEL_PORT_SELECT_VLV(port) ((port) << 30) +#define PANEL_POWER_UP_DELAY_MASK 0x1fff0000 +#define PANEL_POWER_UP_DELAY_SHIFT 16 +#define PANEL_LIGHT_ON_DELAY_MASK 0x1fff +#define PANEL_LIGHT_ON_DELAY_SHIFT 0 + +#define _PP_OFF_DELAYS 0x6120C +#define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS) +#define PANEL_POWER_DOWN_DELAY_MASK 0x1fff0000 +#define PANEL_POWER_DOWN_DELAY_SHIFT 16 +#define PANEL_LIGHT_OFF_DELAY_MASK 0x1fff +#define PANEL_LIGHT_OFF_DELAY_SHIFT 0 + +#define _PP_DIVISOR 0x61210 +#define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR) +#define PP_REFERENCE_DIVIDER_MASK 0xffffff00 +#define PP_REFERENCE_DIVIDER_SHIFT 8 +#define PANEL_POWER_CYCLE_DELAY_MASK 0x1f +#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 /* Panel fitting */ #define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230) @@ -6750,77 +6794,6 @@ enum { #define PCH_LVDS _MMIO(0xe1180) #define LVDS_DETECTED (1 << 1) -/* vlv has 2 sets of panel control regs. */ -#define _PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) -#define _PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) -#define _PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) -#define PANEL_PORT_SELECT_VLV(port) ((port) << 30) -#define _PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) -#define _PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) - -#define _PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) -#define _PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) -#define _PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) -#define _PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) -#define _PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) - -#define VLV_PIPE_PP_STATUS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_STATUS, _PIPEB_PP_STATUS) -#define VLV_PIPE_PP_CONTROL(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_CONTROL, _PIPEB_PP_CONTROL) -#define VLV_PIPE_PP_ON_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_ON_DELAYS, _PIPEB_PP_ON_DELAYS) -#define VLV_PIPE_PP_OFF_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_OFF_DELAYS, _PIPEB_PP_OFF_DELAYS) -#define VLV_PIPE_PP_DIVISOR(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_DIVISOR, _PIPEB_PP_DIVISOR) - -#define _PCH_PP_STATUS 0xc7200 -#define _PCH_PP_CONTROL 0xc7204 -#define PANEL_UNLOCK_REGS (0xabcd << 16) -#define PANEL_UNLOCK_MASK (0xffff << 16) -#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0) -#define BXT_POWER_CYCLE_DELAY_SHIFT 4 -#define EDP_FORCE_VDD (1 << 3) -#define EDP_BLC_ENABLE (1 << 2) -#define PANEL_POWER_RESET (1 << 1) -#define PANEL_POWER_OFF (0 << 0) -#define PANEL_POWER_ON (1 << 0) -#define _PCH_PP_ON_DELAYS 0xc7208 -#define PANEL_PORT_SELECT_MASK (3 << 30) -#define PANEL_PORT_SELECT_LVDS (0 << 30) -#define PANEL_PORT_SELECT_DPA (1 << 30) -#define PANEL_PORT_SELECT_DPC (2 << 30) -#define PANEL_PORT_SELECT_DPD (3 << 30) -#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) -#define PANEL_POWER_UP_DELAY_SHIFT 16 -#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) -#define PANEL_LIGHT_ON_DELAY_SHIFT 0 - -#define _PCH_PP_OFF_DELAYS 0xc720c -#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) -#define PANEL_POWER_DOWN_DELAY_SHIFT 16 -#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) -#define PANEL_LIGHT_OFF_DELAY_SHIFT 0 - -#define _PCH_PP_DIVISOR 0xc7210 -#define PP_REFERENCE_DIVIDER_MASK (0xffffff00) -#define PP_REFERENCE_DIVIDER_SHIFT 8 -#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) -#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 - -#define PCH_PP_STATUS _MMIO(_PCH_PP_STATUS) -#define PCH_PP_CONTROL _MMIO(_PCH_PP_CONTROL) -#define PCH_PP_ON_DELAYS _MMIO(_PCH_PP_ON_DELAYS) -#define PCH_PP_OFF_DELAYS _MMIO(_PCH_PP_OFF_DELAYS) -#define PCH_PP_DIVISOR _MMIO(_PCH_PP_DIVISOR) - -/* BXT PPS changes - 2nd set of PPS registers */ -#define _BXT_PP_STATUS2 0xc7300 -#define _BXT_PP_CONTROL2 0xc7304 -#define _BXT_PP_ON_DELAYS2 0xc7308 -#define _BXT_PP_OFF_DELAYS2 0xc730c - -#define BXT_PP_STATUS(n) _MMIO_PIPE(n, _PCH_PP_STATUS, _BXT_PP_STATUS2) -#define BXT_PP_CONTROL(n) _MMIO_PIPE(n, _PCH_PP_CONTROL, _BXT_PP_CONTROL2) -#define BXT_PP_ON_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2) -#define BXT_PP_OFF_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2) - #define _PCH_DP_B 0xe4100 #define PCH_DP_B _MMIO(_PCH_DP_B) #define _PCH_DPB_AUX_CH_CTL 0xe4110 @@ -7063,12 +7036,13 @@ enum { #define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C) #define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030) #define GEN6_RP_CUR_UP_EI _MMIO(0xA050) -#define GEN6_CURICONT_MASK 0xffffff +#define GEN6_RP_EI_MASK 0xffffff +#define GEN6_CURICONT_MASK GEN6_RP_EI_MASK #define GEN6_RP_CUR_UP _MMIO(0xA054) -#define GEN6_CURBSYTAVG_MASK 0xffffff +#define GEN6_CURBSYTAVG_MASK GEN6_RP_EI_MASK #define GEN6_RP_PREV_UP _MMIO(0xA058) #define GEN6_RP_CUR_DOWN_EI _MMIO(0xA05C) -#define GEN6_CURIAVG_MASK 0xffffff +#define GEN6_CURIAVG_MASK GEN6_RP_EI_MASK #define GEN6_RP_CUR_DOWN _MMIO(0xA060) #define GEN6_RP_PREV_DOWN _MMIO(0xA064) #define GEN6_RP_UP_EI _MMIO(0xA068) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 5cfe4c7..4f27277 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -37,25 +37,6 @@ static void i915_save_display(struct drm_device *dev) if (INTEL_INFO(dev)->gen <= 4) dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); - /* LVDS state */ - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) - dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); - else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) - dev_priv->regfile.saveLVDS = I915_READ(LVDS); - - /* Panel power sequencer */ - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); - dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); - dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); - dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); - } else if (INTEL_INFO(dev)->gen <= 4) { - dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); - dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); - dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); - dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); - } - /* save FBC interval */ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); @@ -64,33 +45,11 @@ static void i915_save_display(struct drm_device *dev) static void i915_restore_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - u32 mask = 0xffffffff; /* Display arbitration */ if (INTEL_INFO(dev)->gen <= 4) I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); - mask = ~LVDS_PORT_EN; - - /* LVDS state */ - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) - I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask); - else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) - I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask); - - /* Panel power sequencer */ - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); - I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); - I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); - I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); - } else if (INTEL_INFO(dev)->gen <= 4) { - I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); - I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); - I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); - I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); - } - /* only restore FBC info on the platform that supports FBC*/ intel_fbc_global_disable(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 9086744..2491e4c 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -26,6 +26,40 @@ #include "i915_drv.h" +static void intel_breadcrumbs_hangcheck(unsigned long data) +{ + struct intel_engine_cs *engine = (struct intel_engine_cs *)data; + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + if (!b->irq_enabled) + return; + + if (time_before(jiffies, b->timeout)) { + mod_timer(&b->hangcheck, b->timeout); + return; + } + + DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name); + set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); + mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); + + /* Ensure that even if the GPU hangs, we get woken up. + * + * However, note that if no one is waiting, we never notice + * a gpu hang. Eventually, we will have to wait for a resource + * held by the GPU and so trigger a hangcheck. In the most + * pathological case, this will be upon memory starvation! To + * prevent this, we also queue the hangcheck from the retire + * worker. + */ + i915_queue_hangcheck(engine->i915); +} + +static unsigned long wait_timeout(void) +{ + return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES); +} + static void intel_breadcrumbs_fake_irq(unsigned long data) { struct intel_engine_cs *engine = (struct intel_engine_cs *)data; @@ -37,10 +71,8 @@ static void intel_breadcrumbs_fake_irq(unsigned long data) * every jiffie in order to kick the oldest waiter to do the * coherent seqno check. */ - rcu_read_lock(); if (intel_engine_wakeup(engine)) mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); - rcu_read_unlock(); } static void irq_enable(struct intel_engine_cs *engine) @@ -51,13 +83,6 @@ static void irq_enable(struct intel_engine_cs *engine) */ engine->breadcrumbs.irq_posted = true; - /* Make sure the current hangcheck doesn't falsely accuse a just - * started irq handler from missing an interrupt (because the - * interrupt count still matches the stale value from when - * the irq handler was disabled, many hangchecks ago). - */ - engine->breadcrumbs.irq_wakeups++; - spin_lock_irq(&engine->i915->irq_lock); engine->irq_enable(engine); spin_unlock_irq(&engine->i915->irq_lock); @@ -98,17 +123,13 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) } if (!b->irq_enabled || - test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) + test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { mod_timer(&b->fake_irq, jiffies + 1); - - /* Ensure that even if the GPU hangs, we get woken up. - * - * However, note that if no one is waiting, we never notice - * a gpu hang. Eventually, we will have to wait for a resource - * held by the GPU and so trigger a hangcheck. In the most - * pathological case, this will be upon memory starvation! - */ - i915_queue_hangcheck(i915); + } else { + /* Ensure we never sleep indefinitely */ + GEM_BUG_ON(!time_after(b->timeout, jiffies)); + mod_timer(&b->hangcheck, b->timeout); + } } static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b) @@ -211,7 +232,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, } rb_link_node(&wait->node, parent, p); rb_insert_color(&wait->node, &b->waiters); - GEM_BUG_ON(!first && !b->irq_seqno_bh); + GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh)); if (completed) { struct rb_node *next = rb_next(completed); @@ -219,8 +240,9 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, GEM_BUG_ON(!next && !first); if (next && next != &wait->node) { GEM_BUG_ON(first); + b->timeout = wait_timeout(); b->first_wait = to_wait(next); - smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); + rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk); /* As there is a delay between reading the current * seqno, processing the completed tasks and selecting * the next waiter, we may have missed the interrupt @@ -245,8 +267,9 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, if (first) { GEM_BUG_ON(rb_first(&b->waiters) != &wait->node); + b->timeout = wait_timeout(); b->first_wait = wait; - smp_store_mb(b->irq_seqno_bh, wait->tsk); + rcu_assign_pointer(b->irq_seqno_bh, wait->tsk); /* After assigning ourselves as the new bottom-half, we must * perform a cursory check to prevent a missed interrupt. * Either we miss the interrupt whilst programming the hardware, @@ -257,7 +280,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, */ __intel_breadcrumbs_enable_irq(b); } - GEM_BUG_ON(!b->irq_seqno_bh); + GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh)); GEM_BUG_ON(!b->first_wait); GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node); @@ -277,11 +300,6 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine, return first; } -void intel_engine_enable_fake_irq(struct intel_engine_cs *engine) -{ - mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); -} - static inline bool chain_wakeup(struct rb_node *rb, int priority) { return rb && to_wait(rb)->tsk->prio <= priority; @@ -317,7 +335,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, const int priority = wakeup_priority(b, wait->tsk); struct rb_node *next; - GEM_BUG_ON(b->irq_seqno_bh != wait->tsk); + GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk); /* We are the current bottom-half. Find the next candidate, * the first waiter in the queue on the remaining oldest @@ -359,14 +377,15 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, * the interrupt, or if we have to handle an * exception rather than a seqno completion. */ + b->timeout = wait_timeout(); b->first_wait = to_wait(next); - smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); + rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk); if (b->first_wait->seqno != wait->seqno) __intel_breadcrumbs_enable_irq(b); - wake_up_process(b->irq_seqno_bh); + wake_up_process(b->first_wait->tsk); } else { b->first_wait = NULL; - WRITE_ONCE(b->irq_seqno_bh, NULL); + rcu_assign_pointer(b->irq_seqno_bh, NULL); __intel_breadcrumbs_disable_irq(b); } } else { @@ -380,7 +399,7 @@ out_unlock: GEM_BUG_ON(b->first_wait == wait); GEM_BUG_ON(rb_first(&b->waiters) != (b->first_wait ? &b->first_wait->node : NULL)); - GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters)); + GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters)); spin_unlock(&b->lock); } @@ -536,6 +555,9 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) setup_timer(&b->fake_irq, intel_breadcrumbs_fake_irq, (unsigned long)engine); + setup_timer(&b->hangcheck, + intel_breadcrumbs_hangcheck, + (unsigned long)engine); /* Spawn a thread to provide a common bottom-half for all signals. * As this is an asynchronous interface we cannot steal the current @@ -560,6 +582,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) if (!IS_ERR_OR_NULL(b->signaler)) kthread_stop(b->signaler); + del_timer_sync(&b->hangcheck); del_timer_sync(&b->fake_irq); } @@ -573,11 +596,9 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915) * RCU lock, i.e. as we call wake_up_process() we must be holding the * rcu_read_lock(). */ - rcu_read_lock(); for_each_engine(engine, i915) if (unlikely(intel_engine_wakeup(engine))) mask |= intel_engine_flag(engine); - rcu_read_unlock(); return mask; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 54bbb9c..d224f64 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1202,8 +1202,8 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, if (HAS_PCH_SPLIT(dev)) { u32 port_sel; - pp_reg = PCH_PP_CONTROL; - port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; + pp_reg = PP_CONTROL(0); + port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; if (port_sel == PANEL_PORT_SELECT_LVDS && I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) @@ -1211,10 +1211,10 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, /* XXX: else fix for eDP */ } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { /* presumably write lock depends on pipe, not port select */ - pp_reg = VLV_PIPE_PP_CONTROL(pipe); + pp_reg = PP_CONTROL(pipe); panel_pipe = pipe; } else { - pp_reg = PP_CONTROL; + pp_reg = PP_CONTROL(0); if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) panel_pipe = PIPE_B; } @@ -1959,12 +1959,12 @@ static void intel_enable_pipe(struct intel_crtc *crtc) * a plane. On ILK+ the pipe PLLs are integrated, so we don't * need the check. */ - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH_DISPLAY(dev_priv)) { if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); - else { + } else { if (crtc->config->has_pch_encoder) { /* if driving the PCH, we need FDI enabled */ assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); @@ -2147,33 +2147,6 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, } } -static void -intel_fill_fb_info(struct drm_i915_private *dev_priv, - struct drm_framebuffer *fb) -{ - struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info; - unsigned int tile_size, tile_width, tile_height, cpp; - - tile_size = intel_tile_size(dev_priv); - - cpp = drm_format_plane_cpp(fb->pixel_format, 0); - intel_tile_dims(dev_priv, &tile_width, &tile_height, - fb->modifier[0], cpp); - - info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp); - info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height); - - if (info->pixel_format == DRM_FORMAT_NV12) { - cpp = drm_format_plane_cpp(fb->pixel_format, 1); - intel_tile_dims(dev_priv, &tile_width, &tile_height, - fb->modifier[1], cpp); - - info->uv_offset = fb->offsets[1]; - info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp); - info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height); - } -} - static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) { if (INTEL_INFO(dev_priv)->gen >= 9) @@ -2206,16 +2179,15 @@ static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv } } -int -intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, - unsigned int rotation) +struct i915_vma * +intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; + struct i915_vma *vma; u32 alignment; - int ret; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -2240,75 +2212,112 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, */ intel_runtime_pm_get(dev_priv); - ret = i915_gem_object_pin_to_display_plane(obj, alignment, - &view); - if (ret) - goto err_pm; - - /* Install a fence for tiled scan-out. Pre-i965 always needs a - * fence, whereas 965+ only requires a fence if using - * framebuffer compression. For simplicity, we always install - * a fence as the cost is not that onerous. - */ - if (view.type == I915_GGTT_VIEW_NORMAL) { - ret = i915_gem_object_get_fence(obj); - if (ret == -EDEADLK) { - /* - * -EDEADLK means there are no free fences - * no pending flips. - * - * This is propagated to atomic, but it uses - * -EDEADLK to force a locking recovery, so - * change the returned error to -EBUSY. - */ - ret = -EBUSY; - goto err_unpin; - } else if (ret) - goto err_unpin; + vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view); + if (IS_ERR(vma)) + goto err; - i915_gem_object_pin_fence(obj); + if (i915_vma_is_map_and_fenceable(vma)) { + /* Install a fence for tiled scan-out. Pre-i965 always needs a + * fence, whereas 965+ only requires a fence if using + * framebuffer compression. For simplicity, we always, when + * possible, install a fence as the cost is not that onerous. + * + * If we fail to fence the tiled scanout, then either the + * modeset will reject the change (which is highly unlikely as + * the affected systems, all but one, do not have unmappable + * space) or we will not be able to enable full powersaving + * techniques (also likely not to apply due to various limits + * FBC and the like impose on the size of the buffer, which + * presumably we violated anyway with this unmappable buffer). + * Anyway, it is presumably better to stumble onwards with + * something and try to run the system in a "less than optimal" + * mode that matches the user configuration. + */ + if (i915_vma_get_fence(vma) == 0) + i915_vma_pin_fence(vma); } +err: intel_runtime_pm_put(dev_priv); - return 0; - -err_unpin: - i915_gem_object_unpin_from_display_plane(obj, &view); -err_pm: - intel_runtime_pm_put(dev_priv); - return ret; + return vma; } void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; + struct i915_vma *vma; WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); intel_fill_fb_ggtt_view(&view, fb, rotation); + vma = i915_gem_object_to_ggtt(obj, &view); - if (view.type == I915_GGTT_VIEW_NORMAL) - i915_gem_object_unpin_fence(obj); + i915_vma_unpin_fence(vma); + i915_gem_object_unpin_from_display_plane(vma); +} - i915_gem_object_unpin_from_display_plane(obj, &view); +static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, + unsigned int rotation) +{ + if (intel_rotation_90_or_270(rotation)) + return to_intel_framebuffer(fb)->rotated[plane].pitch; + else + return fb->pitches[plane]; +} + +/* + * Convert the x/y offsets into a linear offset. + * Only valid with 0/180 degree rotation, which is fine since linear + * offset is only used with linear buffers on pre-hsw and tiled buffers + * with gen2/3, and 90/270 degree rotations isn't supported on any of them. + */ +u32 intel_fb_xy_to_linear(int x, int y, + const struct intel_plane_state *state, + int plane) +{ + const struct drm_framebuffer *fb = state->base.fb; + unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int pitch = fb->pitches[plane]; + + return y * pitch + x * cpp; +} + +/* + * Add the x/y offsets derived from fb->offsets[] to the user + * specified plane src x/y offsets. The resulting x/y offsets + * specify the start of scanout from the beginning of the gtt mapping. + */ +void intel_add_fb_offsets(int *x, int *y, + const struct intel_plane_state *state, + int plane) + +{ + const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb); + unsigned int rotation = state->base.rotation; + + if (intel_rotation_90_or_270(rotation)) { + *x += intel_fb->rotated[plane].x; + *y += intel_fb->rotated[plane].y; + } else { + *x += intel_fb->normal[plane].x; + *y += intel_fb->normal[plane].y; + } } /* - * Adjust the tile offset by moving the difference into - * the x/y offsets. - * * Input tile dimensions and pitch must already be * rotated to match x and y, and in pixel units. */ -static u32 intel_adjust_tile_offset(int *x, int *y, - unsigned int tile_width, - unsigned int tile_height, - unsigned int tile_size, - unsigned int pitch_tiles, - u32 old_offset, - u32 new_offset) -{ +static u32 _intel_adjust_tile_offset(int *x, int *y, + unsigned int tile_width, + unsigned int tile_height, + unsigned int tile_size, + unsigned int pitch_tiles, + u32 old_offset, + u32 new_offset) +{ + unsigned int pitch_pixels = pitch_tiles * tile_width; unsigned int tiles; WARN_ON(old_offset & (tile_size - 1)); @@ -2320,6 +2329,54 @@ static u32 intel_adjust_tile_offset(int *x, int *y, *y += tiles / pitch_tiles * tile_height; *x += tiles % pitch_tiles * tile_width; + /* minimize x in case it got needlessly big */ + *y += *x / pitch_pixels * tile_height; + *x %= pitch_pixels; + + return new_offset; +} + +/* + * Adjust the tile offset by moving the difference into + * the x/y offsets. + */ +static u32 intel_adjust_tile_offset(int *x, int *y, + const struct intel_plane_state *state, int plane, + u32 old_offset, u32 new_offset) +{ + const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); + const struct drm_framebuffer *fb = state->base.fb; + unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int rotation = state->base.rotation; + unsigned int pitch = intel_fb_pitch(fb, plane, rotation); + + WARN_ON(new_offset > old_offset); + + if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) { + unsigned int tile_size, tile_width, tile_height; + unsigned int pitch_tiles; + + tile_size = intel_tile_size(dev_priv); + intel_tile_dims(dev_priv, &tile_width, &tile_height, + fb->modifier[plane], cpp); + + if (intel_rotation_90_or_270(rotation)) { + pitch_tiles = pitch / tile_height; + swap(tile_width, tile_height); + } else { + pitch_tiles = pitch / (tile_width * cpp); + } + + _intel_adjust_tile_offset(x, y, tile_width, tile_height, + tile_size, pitch_tiles, + old_offset, new_offset); + } else { + old_offset += *y * pitch + *x * cpp; + + *y = (old_offset - new_offset) / pitch; + *x = ((old_offset - new_offset) - *y * pitch) / cpp; + } + return new_offset; } @@ -2330,18 +2387,24 @@ static u32 intel_adjust_tile_offset(int *x, int *y, * In the 90/270 rotated case, x and y are assumed * to be already rotated to match the rotated GTT view, and * pitch is the tile_height aligned framebuffer height. + * + * This function is used when computing the derived information + * under intel_framebuffer, so using any of that information + * here is not allowed. Anything under drm_framebuffer can be + * used. This is why the user has to pass in the pitch since it + * is specified in the rotated orientation. */ -u32 intel_compute_tile_offset(int *x, int *y, - const struct drm_framebuffer *fb, int plane, - unsigned int pitch, - unsigned int rotation) +static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, + int *x, int *y, + const struct drm_framebuffer *fb, int plane, + unsigned int pitch, + unsigned int rotation, + u32 alignment) { - const struct drm_i915_private *dev_priv = to_i915(fb->dev); uint64_t fb_modifier = fb->modifier[plane]; unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); - u32 offset, offset_aligned, alignment; + u32 offset, offset_aligned; - alignment = intel_surf_alignment(dev_priv, fb_modifier); if (alignment) alignment--; @@ -2369,9 +2432,9 @@ u32 intel_compute_tile_offset(int *x, int *y, offset = (tile_rows * pitch_tiles + tiles) * tile_size; offset_aligned = offset & ~alignment; - intel_adjust_tile_offset(x, y, tile_width, tile_height, - tile_size, pitch_tiles, - offset, offset_aligned); + _intel_adjust_tile_offset(x, y, tile_width, tile_height, + tile_size, pitch_tiles, + offset, offset_aligned); } else { offset = *y * pitch + *x * cpp; offset_aligned = offset & ~alignment; @@ -2383,6 +2446,177 @@ u32 intel_compute_tile_offset(int *x, int *y, return offset_aligned; } +u32 intel_compute_tile_offset(int *x, int *y, + const struct intel_plane_state *state, + int plane) +{ + const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); + const struct drm_framebuffer *fb = state->base.fb; + unsigned int rotation = state->base.rotation; + int pitch = intel_fb_pitch(fb, plane, rotation); + u32 alignment; + + /* AUX_DIST needs only 4K alignment */ + if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1) + alignment = 4096; + else + alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]); + + return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch, + rotation, alignment); +} + +/* Convert the fb->offset[] linear offset into x/y offsets */ +static void intel_fb_offset_to_xy(int *x, int *y, + const struct drm_framebuffer *fb, int plane) +{ + unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int pitch = fb->pitches[plane]; + u32 linear_offset = fb->offsets[plane]; + + *y = linear_offset / pitch; + *x = linear_offset % pitch / cpp; +} + +static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier) +{ + switch (fb_modifier) { + case I915_FORMAT_MOD_X_TILED: + return I915_TILING_X; + case I915_FORMAT_MOD_Y_TILED: + return I915_TILING_Y; + default: + return I915_TILING_NONE; + } +} + +static int +intel_fill_fb_info(struct drm_i915_private *dev_priv, + struct drm_framebuffer *fb) +{ + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct intel_rotation_info *rot_info = &intel_fb->rot_info; + u32 gtt_offset_rotated = 0; + unsigned int max_size = 0; + uint32_t format = fb->pixel_format; + int i, num_planes = drm_format_num_planes(format); + unsigned int tile_size = intel_tile_size(dev_priv); + + for (i = 0; i < num_planes; i++) { + unsigned int width, height; + unsigned int cpp, size; + u32 offset; + int x, y; + + cpp = drm_format_plane_cpp(format, i); + width = drm_format_plane_width(fb->width, format, i); + height = drm_format_plane_height(fb->height, format, i); + + intel_fb_offset_to_xy(&x, &y, fb, i); + + /* + * The fence (if used) is aligned to the start of the object + * so having the framebuffer wrap around across the edge of the + * fenced region doesn't really work. We have no API to configure + * the fence start offset within the object (nor could we probably + * on gen2/3). So it's just easier if we just require that the + * fb layout agrees with the fence layout. We already check that the + * fb stride matches the fence stride elsewhere. + */ + if (i915_gem_object_is_tiled(intel_fb->obj) && + (x + width) * cpp > fb->pitches[i]) { + DRM_DEBUG("bad fb plane %d offset: 0x%x\n", + i, fb->offsets[i]); + return -EINVAL; + } + + /* + * First pixel of the framebuffer from + * the start of the normal gtt mapping. + */ + intel_fb->normal[i].x = x; + intel_fb->normal[i].y = y; + + offset = _intel_compute_tile_offset(dev_priv, &x, &y, + fb, 0, fb->pitches[i], + DRM_ROTATE_0, tile_size); + offset /= tile_size; + + if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) { + unsigned int tile_width, tile_height; + unsigned int pitch_tiles; + struct drm_rect r; + + intel_tile_dims(dev_priv, &tile_width, &tile_height, + fb->modifier[i], cpp); + + rot_info->plane[i].offset = offset; + rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); + rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); + rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); + + intel_fb->rotated[i].pitch = + rot_info->plane[i].height * tile_height; + + /* how many tiles does this plane need */ + size = rot_info->plane[i].stride * rot_info->plane[i].height; + /* + * If the plane isn't horizontally tile aligned, + * we need one more tile. + */ + if (x != 0) + size++; + + /* rotate the x/y offsets to match the GTT view */ + r.x1 = x; + r.y1 = y; + r.x2 = x + width; + r.y2 = y + height; + drm_rect_rotate(&r, + rot_info->plane[i].width * tile_width, + rot_info->plane[i].height * tile_height, + DRM_ROTATE_270); + x = r.x1; + y = r.y1; + + /* rotate the tile dimensions to match the GTT view */ + pitch_tiles = intel_fb->rotated[i].pitch / tile_height; + swap(tile_width, tile_height); + + /* + * We only keep the x/y offsets, so push all of the + * gtt offset into the x/y offsets. + */ + _intel_adjust_tile_offset(&x, &y, tile_size, + tile_width, tile_height, pitch_tiles, + gtt_offset_rotated * tile_size, 0); + + gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; + + /* + * First pixel of the framebuffer from + * the start of the rotated gtt mapping. + */ + intel_fb->rotated[i].x = x; + intel_fb->rotated[i].y = y; + } else { + size = DIV_ROUND_UP((y + height) * fb->pitches[i] + + x * cpp, tile_size); + } + + /* how many tiles in total needed in the bo */ + max_size = max(max_size, offset + size); + } + + if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) { + DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n", + max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size); + return -EINVAL; + } + + return 0; +} + static int i9xx_format_to_fourcc(int format) { switch (format) { @@ -2552,7 +2786,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, continue; obj = intel_fb_obj(fb); - if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { + if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) { drm_framebuffer_reference(fb); goto valid_fb; } @@ -2604,6 +2838,169 @@ valid_fb: &obj->frontbuffer_bits); } +static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, + unsigned int rotation) +{ + int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + + switch (fb->modifier[plane]) { + case DRM_FORMAT_MOD_NONE: + case I915_FORMAT_MOD_X_TILED: + switch (cpp) { + case 8: + return 4096; + case 4: + case 2: + case 1: + return 8192; + default: + MISSING_CASE(cpp); + break; + } + break; + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + switch (cpp) { + case 8: + return 2048; + case 4: + return 4096; + case 2: + case 1: + return 8192; + default: + MISSING_CASE(cpp); + break; + } + break; + default: + MISSING_CASE(fb->modifier[plane]); + } + + return 2048; +} + +static int skl_check_main_surface(struct intel_plane_state *plane_state) +{ + const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int rotation = plane_state->base.rotation; + int x = plane_state->base.src.x1 >> 16; + int y = plane_state->base.src.y1 >> 16; + int w = drm_rect_width(&plane_state->base.src) >> 16; + int h = drm_rect_height(&plane_state->base.src) >> 16; + int max_width = skl_max_plane_width(fb, 0, rotation); + int max_height = 4096; + u32 alignment, offset, aux_offset = plane_state->aux.offset; + + if (w > max_width || h > max_height) { + DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", + w, h, max_width, max_height); + return -EINVAL; + } + + intel_add_fb_offsets(&x, &y, plane_state, 0); + offset = intel_compute_tile_offset(&x, &y, plane_state, 0); + + alignment = intel_surf_alignment(dev_priv, fb->modifier[0]); + + /* + * AUX surface offset is specified as the distance from the + * main surface offset, and it must be non-negative. Make + * sure that is what we will get. + */ + if (offset > aux_offset) + offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, + offset, aux_offset & ~(alignment - 1)); + + /* + * When using an X-tiled surface, the plane blows up + * if the x offset + width exceed the stride. + * + * TODO: linear and Y-tiled seem fine, Yf untested, + */ + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) { + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + + while ((x + w) * cpp > fb->pitches[0]) { + if (offset == 0) { + DRM_DEBUG_KMS("Unable to find suitable display surface offset\n"); + return -EINVAL; + } + + offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, + offset, offset - alignment); + } + } + + plane_state->main.offset = offset; + plane_state->main.x = x; + plane_state->main.y = y; + + return 0; +} + +static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int rotation = plane_state->base.rotation; + int max_width = skl_max_plane_width(fb, 1, rotation); + int max_height = 4096; + int x = plane_state->base.src.x1 >> 17; + int y = plane_state->base.src.y1 >> 17; + int w = drm_rect_width(&plane_state->base.src) >> 17; + int h = drm_rect_height(&plane_state->base.src) >> 17; + u32 offset; + + intel_add_fb_offsets(&x, &y, plane_state, 1); + offset = intel_compute_tile_offset(&x, &y, plane_state, 1); + + /* FIXME not quite sure how/if these apply to the chroma plane */ + if (w > max_width || h > max_height) { + DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", + w, h, max_width, max_height); + return -EINVAL; + } + + plane_state->aux.offset = offset; + plane_state->aux.x = x; + plane_state->aux.y = y; + + return 0; +} + +int skl_check_plane_surface(struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int rotation = plane_state->base.rotation; + int ret; + + /* Rotate src coordinates to match rotated GTT view */ + if (intel_rotation_90_or_270(rotation)) + drm_rect_rotate(&plane_state->base.src, + fb->width, fb->height, DRM_ROTATE_270); + + /* + * Handle the AUX surface first since + * the main surface setup depends on it. + */ + if (fb->pixel_format == DRM_FORMAT_NV12) { + ret = skl_check_nv12_aux_surface(plane_state); + if (ret) + return ret; + } else { + plane_state->aux.offset = ~0xfff; + plane_state->aux.x = 0; + plane_state->aux.y = 0; + } + + ret = skl_check_main_surface(plane_state); + if (ret) + return ret; + + return 0; +} + static void i9xx_update_primary_plane(struct drm_plane *primary, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) @@ -2618,7 +3015,6 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); unsigned int rotation = plane_state->base.rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); int x = plane_state->base.src.x1 >> 16; int y = plane_state->base.src.y1 >> 16; @@ -2671,36 +3067,31 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, BUG(); } - if (INTEL_INFO(dev)->gen >= 4 && i915_gem_object_is_tiled(obj)) + if (INTEL_GEN(dev_priv) >= 4 && + fb->modifier[0] == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; if (IS_G4X(dev)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - linear_offset = y * fb->pitches[0] + x * cpp; + intel_add_fb_offsets(&x, &y, plane_state, 0); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_INFO(dev)->gen >= 4) intel_crtc->dspaddr_offset = - intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= intel_crtc->dspaddr_offset; - } else { - intel_crtc->dspaddr_offset = linear_offset; - } + intel_compute_tile_offset(&x, &y, plane_state, 0); if (rotation == DRM_ROTATE_180) { dspcntr |= DISPPLANE_ROTATE_180; x += (crtc_state->pipe_src_w - 1); y += (crtc_state->pipe_src_h - 1); - - /* Finding the last pixel of the last line of the display - data and adding to linear_offset*/ - linear_offset += - (crtc_state->pipe_src_h - 1) * fb->pitches[0] + - (crtc_state->pipe_src_w - 1) * cpp; } + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + if (INTEL_INFO(dev)->gen < 4) + intel_crtc->dspaddr_offset = linear_offset; + intel_crtc->adjusted_x = x; intel_crtc->adjusted_y = y; @@ -2709,11 +3100,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(DSPSURF(plane), - i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); + intel_fb_gtt_offset(fb, rotation) + + intel_crtc->dspaddr_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPLINOFF(plane), linear_offset); } else - I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); + I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset); POSTING_READ(reg); } @@ -2741,13 +3133,11 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; u32 linear_offset; u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); unsigned int rotation = plane_state->base.rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); int x = plane_state->base.src.x1 >> 16; int y = plane_state->base.src.y1 >> 16; @@ -2780,32 +3170,28 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, BUG(); } - if (i915_gem_object_is_tiled(obj)) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - linear_offset = y * fb->pitches[0] + x * cpp; + intel_add_fb_offsets(&x, &y, plane_state, 0); + intel_crtc->dspaddr_offset = - intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= intel_crtc->dspaddr_offset; + intel_compute_tile_offset(&x, &y, plane_state, 0); + if (rotation == DRM_ROTATE_180) { dspcntr |= DISPPLANE_ROTATE_180; if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { x += (crtc_state->pipe_src_w - 1); y += (crtc_state->pipe_src_h - 1); - - /* Finding the last pixel of the last line of the display - data and adding to linear_offset*/ - linear_offset += - (crtc_state->pipe_src_h - 1) * fb->pitches[0] + - (crtc_state->pipe_src_w - 1) * cpp; } } + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + intel_crtc->adjusted_x = x; intel_crtc->adjusted_y = y; @@ -2813,7 +3199,8 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_WRITE(DSPSURF(plane), - i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); + intel_fb_gtt_offset(fb, rotation) + + intel_crtc->dspaddr_offset); if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { I915_WRITE(DSPOFFSET(plane), (y << 16) | x); } else { @@ -2835,32 +3222,21 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, } } -u32 intel_plane_obj_offset(struct intel_plane *intel_plane, - struct drm_i915_gem_object *obj, - unsigned int plane) +u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, + unsigned int rotation) { + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; struct i915_vma *vma; - u64 offset; - intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb, - intel_plane->base.state->rotation); + intel_fill_fb_ggtt_view(&view, fb, rotation); - vma = i915_gem_obj_to_ggtt_view(obj, &view); + vma = i915_gem_object_to_ggtt(obj, &view); if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", - view.type)) + view.type)) return -1; - offset = vma->node.start; - - if (plane == 1) { - offset += vma->ggtt_view.params.rotated.uv_start_page * - PAGE_SIZE; - } - - WARN_ON(upper_32_bits(offset)); - - return lower_32_bits(offset); + return i915_ggtt_offset(vma); } static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) @@ -2890,6 +3266,28 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc) } } +u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, + unsigned int rotation) +{ + const struct drm_i915_private *dev_priv = to_i915(fb->dev); + u32 stride = intel_fb_pitch(fb, plane, rotation); + + /* + * The stride is either expressed as a multiple of 64 bytes chunks for + * linear buffers or in number of tiles for tiled buffers. + */ + if (intel_rotation_90_or_270(rotation)) { + int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + + stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp); + } else { + stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0], + fb->pixel_format); + } + + return stride; +} + u32 skl_plane_ctl_format(uint32_t pixel_format) { switch (pixel_format) { @@ -2979,16 +3377,14 @@ static void skylake_update_primary_plane(struct drm_plane *plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_crtc->pipe; - u32 plane_ctl, stride_div, stride; - u32 tile_height, plane_offset, plane_size; + u32 plane_ctl; unsigned int rotation = plane_state->base.rotation; - int x_offset, y_offset; - u32 surf_addr; + u32 stride = skl_plane_stride(fb, 0, rotation); + u32 surf_addr = plane_state->main.offset; int scaler_id = plane_state->scaler_id; - int src_x = plane_state->base.src.x1 >> 16; - int src_y = plane_state->base.src.y1 >> 16; + int src_x = plane_state->main.x; + int src_y = plane_state->main.y; int src_w = drm_rect_width(&plane_state->base.src) >> 16; int src_h = drm_rect_height(&plane_state->base.src) >> 16; int dst_x = plane_state->base.dst.x1; @@ -3005,36 +3401,19 @@ static void skylake_update_primary_plane(struct drm_plane *plane, plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; plane_ctl |= skl_plane_ctl_rotation(rotation); - stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], - fb->pixel_format); - surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0); - - WARN_ON(drm_rect_width(&plane_state->base.src) == 0); - - if (intel_rotation_90_or_270(rotation)) { - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); - - /* stride = Surface height in tiles */ - tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); - stride = DIV_ROUND_UP(fb->height, tile_height); - x_offset = stride * tile_height - src_y - src_h; - y_offset = src_x; - plane_size = (src_w - 1) << 16 | (src_h - 1); - } else { - stride = fb->pitches[0] / stride_div; - x_offset = src_x; - y_offset = src_y; - plane_size = (src_h - 1) << 16 | (src_w - 1); - } - plane_offset = y_offset << 16 | x_offset; + /* Sizes are 0 based */ + src_w--; + src_h--; + dst_w--; + dst_h--; - intel_crtc->adjusted_x = x_offset; - intel_crtc->adjusted_y = y_offset; + intel_crtc->adjusted_x = src_x; + intel_crtc->adjusted_y = src_y; I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); - I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); - I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); + I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x); I915_WRITE(PLANE_STRIDE(pipe, 0), stride); + I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w); if (scaler_id >= 0) { uint32_t ps_ctrl = 0; @@ -3051,7 +3430,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane, I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x); } - I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); + I915_WRITE(PLANE_SURF(pipe, 0), + intel_fb_gtt_offset(fb, rotation) + surf_addr); POSTING_READ(PLANE_SURF(pipe, 0)); } @@ -3093,40 +3473,113 @@ static void intel_update_primary_planes(struct drm_device *dev) for_each_crtc(dev, crtc) { struct intel_plane *plane = to_intel_plane(crtc->primary); - struct intel_plane_state *plane_state; - - drm_modeset_lock_crtc(crtc, &plane->base); - plane_state = to_intel_plane_state(plane->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); if (plane_state->base.visible) plane->update_plane(&plane->base, to_intel_crtc_state(crtc->state), plane_state); + } +} + +static int +__intel_display_resume(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + int i, ret; + + intel_modeset_setup_hw_state(dev); + i915_redisable_vga(dev); + + if (!state) + return 0; - drm_modeset_unlock_crtc(crtc); + for_each_crtc_in_state(state, crtc, crtc_state, i) { + /* + * Force recalculation even if we restore + * current state. With fast modeset this may not result + * in a modeset when the state is compatible. + */ + crtc_state->mode_changed = true; } + + /* ignore any reset values/BIOS leftovers in the WM registers */ + to_intel_atomic_state(state)->skip_intermediate_wm = true; + + ret = drm_atomic_commit(state); + + WARN_ON(ret == -EDEADLK); + return ret; +} + +static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) +{ + return intel_has_gpu_reset(dev_priv) && + INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv); } void intel_prepare_reset(struct drm_i915_private *dev_priv) { - /* no reset support for gen2 */ - if (IS_GEN2(dev_priv)) - return; + struct drm_device *dev = &dev_priv->drm; + struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; + struct drm_atomic_state *state; + int ret; - /* reset doesn't touch the display */ - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + /* + * Need mode_config.mutex so that we don't + * trample ongoing ->detect() and whatnot. + */ + mutex_lock(&dev->mode_config.mutex); + drm_modeset_acquire_init(ctx, 0); + while (1) { + ret = drm_modeset_lock_all_ctx(dev, ctx); + if (ret != -EDEADLK) + break; + + drm_modeset_backoff(ctx); + } + + /* reset doesn't touch the display, but flips might get nuked anyway, */ + if (!i915.force_reset_modeset_test && + !gpu_reset_clobbers_display(dev_priv)) return; - drm_modeset_lock_all(&dev_priv->drm); /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. */ - intel_display_suspend(&dev_priv->drm); + state = drm_atomic_helper_duplicate_state(dev, ctx); + if (IS_ERR(state)) { + ret = PTR_ERR(state); + state = NULL; + DRM_ERROR("Duplicating state failed with %i\n", ret); + goto err; + } + + ret = drm_atomic_helper_disable_all(dev, ctx); + if (ret) { + DRM_ERROR("Suspending crtc's failed with %i\n", ret); + goto err; + } + + dev_priv->modeset_restore_state = state; + state->acquire_ctx = ctx; + return; + +err: + drm_atomic_state_free(state); } void intel_finish_reset(struct drm_i915_private *dev_priv) { + struct drm_device *dev = &dev_priv->drm; + struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; + struct drm_atomic_state *state = dev_priv->modeset_restore_state; + int ret; + /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space @@ -3134,44 +3587,51 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) */ intel_complete_page_flips(dev_priv); - /* no reset support for gen2 */ - if (IS_GEN2(dev_priv)) - return; + dev_priv->modeset_restore_state = NULL; /* reset doesn't touch the display */ - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { + if (!gpu_reset_clobbers_display(dev_priv)) { + if (!state) { + /* + * Flips in the rings have been nuked by the reset, + * so update the base address of all primary + * planes to the the last fb to make sure we're + * showing the correct fb after a reset. + * + * FIXME: Atomic will make this obsolete since we won't schedule + * CS-based flips (which might get lost in gpu resets) any more. + */ + intel_update_primary_planes(dev); + } else { + ret = __intel_display_resume(dev, state); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); + } + } else { /* - * Flips in the rings have been nuked by the reset, - * so update the base address of all primary - * planes to the the last fb to make sure we're - * showing the correct fb after a reset. - * - * FIXME: Atomic will make this obsolete since we won't schedule - * CS-based flips (which might get lost in gpu resets) any more. + * The display has been reset as well, + * so need a full re-initialization. */ - intel_update_primary_planes(&dev_priv->drm); - return; - } + intel_runtime_pm_disable_interrupts(dev_priv); + intel_runtime_pm_enable_interrupts(dev_priv); - /* - * The display has been reset as well, - * so need a full re-initialization. - */ - intel_runtime_pm_disable_interrupts(dev_priv); - intel_runtime_pm_enable_interrupts(dev_priv); + intel_modeset_init_hw(dev); - intel_modeset_init_hw(&dev_priv->drm); + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - - intel_display_resume(&dev_priv->drm); + ret = __intel_display_resume(dev, state); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); - intel_hpd_init(dev_priv); + intel_hpd_init(dev_priv); + } - drm_modeset_unlock_all(&dev_priv->drm); + drm_modeset_drop_locks(ctx); + drm_modeset_acquire_fini(ctx); + mutex_unlock(&dev->mode_config.mutex); } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) @@ -9411,7 +9871,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); - I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); + I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n"); I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, "CPU PWM1 enabled\n"); if (IS_HASWELL(dev)) @@ -11198,7 +11658,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset | - i915_gem_object_get_tiling(obj)); + intel_fb_modifier_to_tiling(fb->modifier[0])); /* XXX Enabling the panel-fitter across page-flip is so far * untested on non-native modes, so ignore it for now. @@ -11230,7 +11690,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(ring, fb->pitches[0] | i915_gem_object_get_tiling(obj)); + intel_ring_emit(ring, fb->pitches[0] | + intel_fb_modifier_to_tiling(fb->modifier[0])); intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); /* Contrary to the suggestions in the documentation, @@ -11325,7 +11786,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); intel_ring_emit_reg(ring, DERRMR); - intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256); + intel_ring_emit(ring, + i915_ggtt_offset(req->engine->scratch) + 256); if (IS_GEN8(dev)) { intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); @@ -11333,7 +11795,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev, } intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); - intel_ring_emit(ring, fb->pitches[0] | i915_gem_object_get_tiling(obj)); + intel_ring_emit(ring, fb->pitches[0] | + intel_fb_modifier_to_tiling(fb->modifier[0])); intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); intel_ring_emit(ring, (MI_NOOP)); @@ -11382,7 +11845,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_framebuffer *fb = intel_crtc->base.primary->fb; const enum pipe pipe = intel_crtc->pipe; - u32 ctl, stride, tile_height; + u32 ctl, stride = skl_plane_stride(fb, 0, rotation); ctl = I915_READ(PLANE_CTL(pipe, 0)); ctl &= ~PLANE_CTL_TILED_MASK; @@ -11403,20 +11866,6 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, } /* - * The stride is either expressed as a multiple of 64 bytes chunks for - * linear buffers or in number of tiles for tiled buffers. - */ - if (intel_rotation_90_or_270(rotation)) { - /* stride = Surface height in tiles */ - tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0); - stride = DIV_ROUND_UP(fb->height, tile_height); - } else { - stride = fb->pitches[0] / - intel_fb_stride_alignment(dev_priv, fb->modifier[0], - fb->pixel_format); - } - - /* * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on * PLANE_SURF updates, the update is then guaranteed to be atomic. */ @@ -11432,15 +11881,13 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_framebuffer *intel_fb = - to_intel_framebuffer(intel_crtc->base.primary->fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_framebuffer *fb = intel_crtc->base.primary->fb; i915_reg_t reg = DSPCNTR(intel_crtc->plane); u32 dspcntr; dspcntr = I915_READ(reg); - if (i915_gem_object_is_tiled(obj)) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; else dspcntr &= ~DISPPLANE_TILED; @@ -11577,6 +12024,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_engine_cs *engine; bool mmio_flip; struct drm_i915_gem_request *request; + struct i915_vma *vma; int ret; /* @@ -11668,8 +12116,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { engine = &dev_priv->engine[BCS]; - if (i915_gem_object_get_tiling(obj) != - i915_gem_object_get_tiling(intel_fb_obj(work->old_fb))) + if (fb->modifier[0] != old_fb->modifier[0]) /* vlv: DISPLAY_FLIP fails to change tiling */ engine = NULL; } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { @@ -11685,12 +12132,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, mmio_flip = use_mmio_flip(engine, obj); - ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); - if (ret) + vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto cleanup_pending; + } - work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), - obj, 0); + work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation); work->gtt_offset += intel_crtc->dspaddr_offset; work->rotation = crtc->primary->state->rotation; @@ -14035,7 +14483,11 @@ intel_prepare_plane_fb(struct drm_plane *plane, if (ret) DRM_DEBUG_KMS("failed to attach phys object\n"); } else { - ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation); + struct i915_vma *vma; + + vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation); + if (IS_ERR(vma)) + ret = PTR_ERR(vma); } if (ret == 0) { @@ -14110,12 +14562,14 @@ intel_check_primary_plane(struct drm_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state) { + struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_crtc *crtc = state->base.crtc; int min_scale = DRM_PLANE_HELPER_NO_SCALING; int max_scale = DRM_PLANE_HELPER_NO_SCALING; bool can_position = false; + int ret; - if (INTEL_INFO(plane->dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { /* use scaler when colorkey is not required */ if (state->ckey.flags == I915_SET_COLORKEY_NONE) { min_scale = 1; @@ -14124,10 +14578,23 @@ intel_check_primary_plane(struct drm_plane *plane, can_position = true; } - return drm_plane_helper_check_state(&state->base, - &state->clip, - min_scale, max_scale, - can_position, true); + ret = drm_plane_helper_check_state(&state->base, + &state->clip, + min_scale, max_scale, + can_position, true); + if (ret) + return ret; + + if (!state->base.fb) + return 0; + + if (INTEL_GEN(dev_priv) >= 9) { + ret = skl_check_plane_surface(state); + if (ret) + return ret; + } + + return 0; } static void intel_begin_crtc_commit(struct drm_crtc *crtc, @@ -14386,7 +14853,7 @@ intel_update_cursor_plane(struct drm_plane *plane, if (!obj) addr = 0; else if (!INTEL_INFO(dev)->cursor_needs_physical) - addr = i915_gem_obj_ggtt_offset(obj); + addr = i915_gem_object_ggtt_offset(obj, NULL); else addr = obj->phys_handle->busaddr; @@ -14639,12 +15106,50 @@ static bool intel_crt_present(struct drm_device *dev) return true; } +void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) +{ + int pps_num; + int pps_idx; + + if (HAS_DDI(dev_priv)) + return; + /* + * This w/a is needed at least on CPT/PPT, but to be sure apply it + * everywhere where registers can be write protected. + */ + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + pps_num = 2; + else + pps_num = 1; + + for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { + u32 val = I915_READ(PP_CONTROL(pps_idx)); + + val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; + I915_WRITE(PP_CONTROL(pps_idx), val); + } +} + +static void intel_pps_init(struct drm_i915_private *dev_priv) +{ + if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv)) + dev_priv->pps_mmio_base = PCH_PPS_BASE; + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + dev_priv->pps_mmio_base = VLV_PPS_BASE; + else + dev_priv->pps_mmio_base = PPS_BASE; + + intel_pps_unlock_regs_wa(dev_priv); +} + static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; bool dpd_is_edp = false; + intel_pps_init(dev_priv); + /* * intel_edp_init_connector() depends on this completing first, to * prevent the registeration of both eDP and LVDS and the incorrect @@ -14912,7 +15417,7 @@ static int intel_framebuffer_init(struct drm_device *dev, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(dev); - unsigned int aligned_height; + unsigned int tiling = i915_gem_object_get_tiling(obj); int ret; u32 pitch_limit, stride_alignment; char *format_name; @@ -14920,17 +15425,19 @@ static int intel_framebuffer_init(struct drm_device *dev, WARN_ON(!mutex_is_locked(&dev->struct_mutex)); if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { - /* Enforce that fb modifier and tiling mode match, but only for - * X-tiled. This is needed for FBC. */ - if (!!(i915_gem_object_get_tiling(obj) == I915_TILING_X) != - !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { + /* + * If there's a fence, enforce that + * the fb modifier and tiling mode match. + */ + if (tiling != I915_TILING_NONE && + tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); return -EINVAL; } } else { - if (i915_gem_object_get_tiling(obj) == I915_TILING_X) + if (tiling == I915_TILING_X) { mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; - else if (i915_gem_object_get_tiling(obj) == I915_TILING_Y) { + } else if (tiling == I915_TILING_Y) { DRM_DEBUG("No Y tiling for legacy addfb\n"); return -EINVAL; } @@ -14954,6 +15461,16 @@ static int intel_framebuffer_init(struct drm_device *dev, return -EINVAL; } + /* + * gen2/3 display engine uses the fence if present, + * so the tiling mode must match the fb modifier exactly. + */ + if (INTEL_INFO(dev_priv)->gen < 4 && + tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { + DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n"); + return -EINVAL; + } + stride_alignment = intel_fb_stride_alignment(dev_priv, mode_cmd->modifier[0], mode_cmd->pixel_format); @@ -14973,7 +15490,11 @@ static int intel_framebuffer_init(struct drm_device *dev, return -EINVAL; } - if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && + /* + * If there's a fence, enforce that + * the fb pitch and fence stride match. + */ + if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) { DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", mode_cmd->pitches[0], @@ -15045,17 +15566,12 @@ static int intel_framebuffer_init(struct drm_device *dev, if (mode_cmd->offsets[0] != 0) return -EINVAL; - aligned_height = intel_fb_align_height(dev, mode_cmd->height, - mode_cmd->pixel_format, - mode_cmd->modifier[0]); - /* FIXME drm helper for size checks (especially planar formats)? */ - if (obj->base.size < aligned_height * mode_cmd->pitches[0]) - return -EINVAL; - drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); intel_fb->obj = obj; - intel_fill_fb_info(dev_priv, &intel_fb->base); + ret = intel_fill_fb_info(dev_priv, &intel_fb->base); + if (ret) + return ret; ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { @@ -15768,6 +16284,13 @@ static bool intel_encoder_has_connectors(struct intel_encoder *encoder) return false; } +static bool has_pch_trancoder(struct drm_i915_private *dev_priv, + enum transcoder pch_transcoder) +{ + return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || + (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); +} + static void intel_sanitize_crtc(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -15846,7 +16369,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) * worst a fifo underrun happens which also sets this to false. */ crtc->cpu_fifo_underrun_disabled = true; - crtc->pch_fifo_underrun_disabled = true; + /* + * We track the PCH trancoder underrun reporting state + * within the crtc. With crtc for pipe A housing the underrun + * reporting state for PCH transcoder A, crtc for pipe B housing + * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, + * and marking underrun reporting as disabled for the non-existing + * PCH transcoders B and C would prevent enabling the south + * error interrupt (see cpt_can_enable_serr_int()). + */ + if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe)) + crtc->pch_fifo_underrun_disabled = true; } } @@ -16160,9 +16693,10 @@ void intel_display_resume(struct drm_device *dev) struct drm_atomic_state *state = dev_priv->modeset_restore_state; struct drm_modeset_acquire_ctx ctx; int ret; - bool setup = false; dev_priv->modeset_restore_state = NULL; + if (state) + state->acquire_ctx = &ctx; /* * This is a cludge because with real atomic modeset mode_config.mutex @@ -16173,43 +16707,17 @@ void intel_display_resume(struct drm_device *dev) mutex_lock(&dev->mode_config.mutex); drm_modeset_acquire_init(&ctx, 0); -retry: - ret = drm_modeset_lock_all_ctx(dev, &ctx); - - if (ret == 0 && !setup) { - setup = true; - - intel_modeset_setup_hw_state(dev); - i915_redisable_vga(dev); - } - - if (ret == 0 && state) { - struct drm_crtc_state *crtc_state; - struct drm_crtc *crtc; - int i; - - state->acquire_ctx = &ctx; - - /* ignore any reset values/BIOS leftovers in the WM registers */ - to_intel_atomic_state(state)->skip_intermediate_wm = true; - - for_each_crtc_in_state(state, crtc, crtc_state, i) { - /* - * Force recalculation even if we restore - * current state. With fast modeset this may not result - * in a modeset when the state is compatible. - */ - crtc_state->mode_changed = true; - } - - ret = drm_atomic_commit(state); - } + while (1) { + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (ret != -EDEADLK) + break; - if (ret == -EDEADLK) { drm_modeset_backoff(&ctx); - goto retry; } + if (!ret) + ret = __intel_display_resume(dev, state); + drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); mutex_unlock(&dev->mode_config.mutex); @@ -16225,7 +16733,6 @@ void intel_modeset_gem_init(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; struct drm_i915_gem_object *obj; - int ret; intel_init_gt_powersave(dev_priv); @@ -16239,15 +16746,17 @@ void intel_modeset_gem_init(struct drm_device *dev) * for this. */ for_each_crtc(dev, c) { + struct i915_vma *vma; + obj = intel_fb_obj(c->primary->fb); if (obj == NULL) continue; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(c->primary->fb, + vma = intel_pin_and_fence_fb_obj(c->primary->fb, c->primary->state->rotation); mutex_unlock(&dev->struct_mutex); - if (ret) { + if (IS_ERR(vma)) { DRM_ERROR("failed to pin boot fb on pipe %d\n", to_intel_crtc(c)->pipe); drm_framebuffer_unreference(c->primary->fb); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8fe2afa..364db90 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -256,6 +256,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, static void intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, struct intel_dp *intel_dp); +static void +intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp); static void pps_lock(struct intel_dp *intel_dp) { @@ -463,13 +465,13 @@ typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, enum pipe pipe) { - return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON; + return I915_READ(PP_STATUS(pipe)) & PP_ON; } static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, enum pipe pipe) { - return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD; + return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD; } static bool vlv_pipe_any(struct drm_i915_private *dev_priv, @@ -486,7 +488,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, enum pipe pipe; for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { - u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) & + u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) & PANEL_PORT_SELECT_MASK; if (port_sel != PANEL_PORT_SELECT_VLV(port)) @@ -583,30 +585,21 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv, struct intel_dp *intel_dp, struct pps_registers *regs) { + int pps_idx = 0; + memset(regs, 0, sizeof(*regs)); - if (IS_BROXTON(dev_priv)) { - int idx = bxt_power_sequencer_idx(intel_dp); - - regs->pp_ctrl = BXT_PP_CONTROL(idx); - regs->pp_stat = BXT_PP_STATUS(idx); - regs->pp_on = BXT_PP_ON_DELAYS(idx); - regs->pp_off = BXT_PP_OFF_DELAYS(idx); - } else if (HAS_PCH_SPLIT(dev_priv)) { - regs->pp_ctrl = PCH_PP_CONTROL; - regs->pp_stat = PCH_PP_STATUS; - regs->pp_on = PCH_PP_ON_DELAYS; - regs->pp_off = PCH_PP_OFF_DELAYS; - regs->pp_div = PCH_PP_DIVISOR; - } else { - enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + if (IS_BROXTON(dev_priv)) + pps_idx = bxt_power_sequencer_idx(intel_dp); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + pps_idx = vlv_power_sequencer_pipe(intel_dp); - regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe); - regs->pp_stat = VLV_PIPE_PP_STATUS(pipe); - regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe); - regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe); - regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe); - } + regs->pp_ctrl = PP_CONTROL(pps_idx); + regs->pp_stat = PP_STATUS(pps_idx); + regs->pp_on = PP_ON_DELAYS(pps_idx); + regs->pp_off = PP_OFF_DELAYS(pps_idx); + if (!IS_BROXTON(dev_priv)) + regs->pp_div = PP_DIVISOR(pps_idx); } static i915_reg_t @@ -651,8 +644,8 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, i915_reg_t pp_ctrl_reg, pp_div_reg; u32 pp_div; - pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); - pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); + pp_ctrl_reg = PP_CONTROL(pipe); + pp_div_reg = PP_DIVISOR(pipe); pp_div = I915_READ(pp_div_reg); pp_div &= PP_REFERENCE_DIVIDER_MASK; @@ -1836,7 +1829,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); control = I915_READ(_pp_ctrl_reg(intel_dp)); - if (!IS_BROXTON(dev)) { + if (WARN_ON(!HAS_DDI(dev_priv) && + (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { control &= ~PANEL_UNLOCK_MASK; control |= PANEL_UNLOCK_REGS; } @@ -1957,7 +1951,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - if ((pp & POWER_TARGET_ON) == 0) + if ((pp & PANEL_POWER_ON) == 0) intel_dp->panel_power_off_time = ktime_get_boottime(); power_domain = intel_display_port_aux_power_domain(intel_encoder); @@ -2044,7 +2038,7 @@ static void edp_panel_on(struct intel_dp *intel_dp) POSTING_READ(pp_ctrl_reg); } - pp |= POWER_TARGET_ON; + pp |= PANEL_POWER_ON; if (!IS_GEN5(dev)) pp |= PANEL_POWER_RESET; @@ -2096,7 +2090,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) pp = ironlake_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some * panels get very unhappy and cease to work. */ - pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | + pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | EDP_BLC_ENABLE); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); @@ -2729,7 +2723,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); enum pipe pipe = intel_dp->pps_pipe; - i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); + i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); edp_panel_vdd_off_sync(intel_dp); @@ -4666,13 +4660,8 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder) pps_lock(intel_dp); - /* - * Read out the current power sequencer assignment, - * in case the BIOS did something with it. - */ - if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev)) - vlv_initial_power_sequencer_setup(intel_dp); - + /* Reinit the power sequencer, in case BIOS did something with it. */ + intel_dp_pps_init(encoder->dev, intel_dp); intel_edp_panel_vdd_sanitize(intel_dp); pps_unlock(intel_dp); @@ -5020,6 +5009,17 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, I915_READ(regs.pp_div)); } +static void intel_dp_pps_init(struct drm_device *dev, + struct intel_dp *intel_dp) +{ + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + vlv_initial_power_sequencer_setup(intel_dp); + } else { + intel_dp_init_panel_power_sequencer(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + } +} + /** * intel_dp_set_drrs_state - program registers for RR switch to take effect * @dev: DRM device @@ -5434,14 +5434,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, pps_lock(intel_dp); intel_dp_init_panel_power_timestamps(intel_dp); - - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { - vlv_initial_power_sequencer_setup(intel_dp); - } else { - intel_dp_init_panel_power_sequencer(dev, intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); - } - + intel_dp_pps_init(dev, intel_dp); intel_edp_panel_vdd_sanitize(intel_dp); pps_unlock(intel_dp); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b37b8ef..774aab3 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -178,11 +178,22 @@ struct intel_framebuffer { struct drm_framebuffer base; struct drm_i915_gem_object *obj; struct intel_rotation_info rot_info; + + /* for each plane in the normal GTT view */ + struct { + unsigned int x, y; + } normal[2]; + /* for each plane in the rotated GTT view */ + struct { + unsigned int x, y; + unsigned int pitch; /* pixels */ + } rotated[2]; }; struct intel_fbdev { struct drm_fb_helper helper; struct intel_framebuffer *fb; + struct i915_vma *vma; async_cookie_t cookie; int preferred_bpp; }; @@ -340,6 +351,15 @@ struct intel_plane_state { struct drm_plane_state base; struct drm_rect clip; + struct { + u32 offset; + int x, y; + } main; + struct { + u32 offset; + int x, y; + } aux; + /* * scaler_id * = -1 : not using a scaler @@ -1153,12 +1173,18 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); extern const struct drm_plane_funcs intel_plane_funcs; void intel_init_display_hooks(struct drm_i915_private *dev_priv); +unsigned int intel_fb_xy_to_linear(int x, int y, + const struct intel_plane_state *state, + int plane); +void intel_add_fb_offsets(int *x, int *y, + const struct intel_plane_state *state, int plane); unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); bool intel_has_pending_fb_unpin(struct drm_device *dev); void intel_mark_busy(struct drm_i915_private *dev_priv); void intel_mark_idle(struct drm_i915_private *dev_priv); void intel_crtc_restore_mode(struct drm_crtc *crtc); int intel_display_suspend(struct drm_device *dev); +void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); void intel_encoder_destroy(struct drm_encoder *encoder); int intel_connector_init(struct intel_connector *); struct intel_connector *intel_connector_alloc(void); @@ -1214,8 +1240,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, void intel_release_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); -int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, - unsigned int rotation); +struct i915_vma * +intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, @@ -1277,9 +1303,7 @@ void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) u32 intel_compute_tile_offset(int *x, int *y, - const struct drm_framebuffer *fb, int plane, - unsigned int pitch, - unsigned int rotation); + const struct intel_plane_state *state, int plane); void intel_prepare_reset(struct drm_i915_private *dev_priv); void intel_finish_reset(struct drm_i915_private *dev_priv); void hsw_enable_pc8(struct drm_i915_private *dev_priv); @@ -1322,13 +1346,14 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); -u32 intel_plane_obj_offset(struct intel_plane *intel_plane, - struct drm_i915_gem_object *obj, - unsigned int plane); +u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation); u32 skl_plane_ctl_format(uint32_t pixel_format); u32 skl_plane_ctl_tiling(uint64_t fb_modifier); u32 skl_plane_ctl_rotation(unsigned int rotation); +u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, + unsigned int rotation); +int skl_check_plane_surface(struct intel_plane_state *plane_state); /* intel_csr.c */ void intel_csr_ucode_init(struct drm_i915_private *); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index e9b301a..2e96a86 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -29,7 +29,7 @@ static const struct engine_info { const char *name; unsigned exec_id; - unsigned guc_id; + enum intel_engine_hw_id hw_id; u32 mmio_base; unsigned irq_shift; int (*init_legacy)(struct intel_engine_cs *engine); @@ -38,7 +38,7 @@ static const struct engine_info { [RCS] = { .name = "render ring", .exec_id = I915_EXEC_RENDER, - .guc_id = GUC_RENDER_ENGINE, + .hw_id = RCS_HW, .mmio_base = RENDER_RING_BASE, .irq_shift = GEN8_RCS_IRQ_SHIFT, .init_execlists = logical_render_ring_init, @@ -47,7 +47,7 @@ static const struct engine_info { [BCS] = { .name = "blitter ring", .exec_id = I915_EXEC_BLT, - .guc_id = GUC_BLITTER_ENGINE, + .hw_id = BCS_HW, .mmio_base = BLT_RING_BASE, .irq_shift = GEN8_BCS_IRQ_SHIFT, .init_execlists = logical_xcs_ring_init, @@ -56,7 +56,7 @@ static const struct engine_info { [VCS] = { .name = "bsd ring", .exec_id = I915_EXEC_BSD, - .guc_id = GUC_VIDEO_ENGINE, + .hw_id = VCS_HW, .mmio_base = GEN6_BSD_RING_BASE, .irq_shift = GEN8_VCS1_IRQ_SHIFT, .init_execlists = logical_xcs_ring_init, @@ -65,7 +65,7 @@ static const struct engine_info { [VCS2] = { .name = "bsd2 ring", .exec_id = I915_EXEC_BSD, - .guc_id = GUC_VIDEO_ENGINE2, + .hw_id = VCS2_HW, .mmio_base = GEN8_BSD2_RING_BASE, .irq_shift = GEN8_VCS2_IRQ_SHIFT, .init_execlists = logical_xcs_ring_init, @@ -74,7 +74,7 @@ static const struct engine_info { [VECS] = { .name = "video enhancement ring", .exec_id = I915_EXEC_VEBOX, - .guc_id = GUC_VIDEOENHANCE_ENGINE, + .hw_id = VECS_HW, .mmio_base = VEBOX_RING_BASE, .irq_shift = GEN8_VECS_IRQ_SHIFT, .init_execlists = logical_xcs_ring_init, @@ -93,7 +93,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv, engine->i915 = dev_priv; engine->name = info->name; engine->exec_id = info->exec_id; - engine->hw_id = engine->guc_id = info->guc_id; + engine->hw_id = engine->guc_id = info->hw_id; engine->mmio_base = info->mmio_base; engine->irq_shift = info->irq_shift; @@ -109,6 +109,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv, int intel_engines_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_device_info *device_info = mkwrite_device_info(dev_priv); unsigned int mask = 0; int (*init)(struct intel_engine_cs *engine); unsigned int i; @@ -142,11 +143,10 @@ int intel_engines_init(struct drm_device *dev) * are added to the driver by a warning and disabling the forgotten * engines. */ - if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) { - struct intel_device_info *info = - (struct intel_device_info *)&dev_priv->info; - info->ring_mask = mask; - } + if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) + device_info->ring_mask = mask; + + device_info->num_rings = hweight32(mask); return 0; @@ -161,9 +161,56 @@ cleanup: return ret; } +void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno) +{ + struct drm_i915_private *dev_priv = engine->i915; + + /* Our semaphore implementation is strictly monotonic (i.e. we proceed + * so long as the semaphore value in the register/page is greater + * than the sync value), so whenever we reset the seqno, + * so long as we reset the tracking semaphore value to 0, it will + * always be before the next request's seqno. If we don't reset + * the semaphore value, then when the seqno moves backwards all + * future waits will complete instantly (causing rendering corruption). + */ + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { + I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); + I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); + if (HAS_VEBOX(dev_priv)) + I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); + } + if (dev_priv->semaphore) { + struct page *page = i915_vma_first_page(dev_priv->semaphore); + void *semaphores; + + /* Semaphores are in noncoherent memory, flush to be safe */ + semaphores = kmap(page); + memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), + 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size); + drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), + I915_NUM_ENGINES * gen8_semaphore_seqno_size); + kunmap(page); + } + memset(engine->semaphore.sync_seqno, 0, + sizeof(engine->semaphore.sync_seqno)); + + intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); + engine->last_submitted_seqno = seqno; + + engine->hangcheck.seqno = seqno; + + /* After manually advancing the seqno, fake the interrupt in case + * there are any waiters for that seqno. + */ + intel_engine_wakeup(engine); +} + void intel_engine_init_hangcheck(struct intel_engine_cs *engine) { memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); + clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); } static void intel_engine_init_requests(struct intel_engine_cs *engine) @@ -192,6 +239,49 @@ void intel_engine_setup_common(struct intel_engine_cs *engine) intel_engine_init_requests(engine); intel_engine_init_hangcheck(engine); i915_gem_batch_pool_init(engine, &engine->batch_pool); + + intel_engine_init_cmd_parser(engine); +} + +int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int ret; + + WARN_ON(engine->scratch); + + obj = i915_gem_object_create_stolen(&engine->i915->drm, size); + if (!obj) + obj = i915_gem_object_create(&engine->i915->drm, size); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate scratch page\n"); + return PTR_ERR(obj); + } + + vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unref; + } + + ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH); + if (ret) + goto err_unref; + + engine->scratch = vma; + DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", + engine->name, i915_ggtt_offset(vma)); + return 0; + +err_unref: + i915_gem_object_put(obj); + return ret; +} + +static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) +{ + i915_vma_unpin_and_release(&engine->scratch); } /** @@ -213,7 +303,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) if (ret) return ret; - return intel_engine_init_cmd_parser(engine); + return 0; } /** @@ -225,7 +315,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine) */ void intel_engine_cleanup_common(struct intel_engine_cs *engine) { - intel_engine_cleanup_cmd_parser(engine); + intel_engine_cleanup_scratch(engine); + intel_engine_fini_breadcrumbs(engine); + intel_engine_cleanup_cmd_parser(engine); i915_gem_batch_pool_fini(&engine->batch_pool); } diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index e67b09a..bf8b22a 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -190,9 +190,13 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv) dpfc_ctl |= DPFC_CTL_LIMIT_2X; else dpfc_ctl |= DPFC_CTL_LIMIT_1X; - dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; - I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); + if (params->fb.fence_reg != I915_FENCE_REG_NONE) { + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; + I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); + } else { + I915_WRITE(DPFC_FENCE_YOFF, 0); + } /* enable it... */ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -244,21 +248,29 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) dpfc_ctl |= DPFC_CTL_LIMIT_1X; break; } - dpfc_ctl |= DPFC_CTL_FENCE_EN; - if (IS_GEN5(dev_priv)) - dpfc_ctl |= params->fb.fence_reg; + + if (params->fb.fence_reg != I915_FENCE_REG_NONE) { + dpfc_ctl |= DPFC_CTL_FENCE_EN; + if (IS_GEN5(dev_priv)) + dpfc_ctl |= params->fb.fence_reg; + if (IS_GEN6(dev_priv)) { + I915_WRITE(SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, + params->crtc.fence_y_offset); + } + } else { + if (IS_GEN6(dev_priv)) { + I915_WRITE(SNB_DPFC_CTL_SA, 0); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); + } + } I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); - if (IS_GEN6(dev_priv)) { - I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); - } - intel_fbc_recompress(dev_priv); } @@ -305,7 +317,15 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) break; } - dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; + if (params->fb.fence_reg != I915_FENCE_REG_NONE) { + dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; + I915_WRITE(SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); + } else { + I915_WRITE(SNB_DPFC_CTL_SA,0); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); + } if (dev_priv->fbc.false_color) dpfc_ctl |= FBC_CTL_FALSE_COLOR; @@ -324,10 +344,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); - I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); - intel_fbc_recompress(dev_priv); } @@ -709,6 +725,14 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) return effective_w <= max_w && effective_h <= max_h; } +/* XXX replace me when we have VMA tracking for intel_plane_state */ +static int get_fence_id(struct drm_framebuffer *fb) +{ + struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL); + + return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE; +} + static void intel_fbc_update_state_cache(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) @@ -737,10 +761,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, /* FIXME: We lack the proper locking here, so only run this on the * platforms that need. */ if (IS_GEN(dev_priv, 5, 6)) - cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); + cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL); cache->fb.pixel_format = fb->pixel_format; cache->fb.stride = fb->pitches[0]; - cache->fb.fence_reg = obj->fence_reg; + cache->fb.fence_reg = get_fence_id(fb); cache->fb.tiling_mode = i915_gem_object_get_tiling(obj); } @@ -768,11 +792,17 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) /* The use of a CPU fence is mandatory in order to detect writes * by the CPU to the scanout and trigger updates to the FBC. + * + * Note that is possible for a tiled surface to be unmappable (and + * so have no fence associated with it) due to aperture constaints + * at the time of pinning. */ if (cache->fb.tiling_mode != I915_TILING_X || cache->fb.fence_reg == I915_FENCE_REG_NONE) { - fbc->no_fbc_reason = "framebuffer not tiled or fenced"; - return false; + if (INTEL_GEN(dev_priv) < 5) { + fbc->no_fbc_reason = "framebuffer not tiled or fenced"; + return false; + } } if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && cache->plane.rotation != DRM_ROTATE_0) { diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 2c14dfc..4003e49 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -187,7 +187,6 @@ static int intelfb_create(struct drm_fb_helper *helper, struct fb_info *info; struct drm_framebuffer *fb; struct i915_vma *vma; - struct drm_i915_gem_object *obj; bool prealloc = false; void __iomem *vaddr; int ret; @@ -215,17 +214,17 @@ static int intelfb_create(struct drm_fb_helper *helper, sizes->fb_height = intel_fb->base.height; } - obj = intel_fb->obj; - mutex_lock(&dev->struct_mutex); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the * BIOS is suitable for own access. */ - ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); - if (ret) + vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto out_unlock; + } info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { @@ -245,13 +244,11 @@ static int intelfb_create(struct drm_fb_helper *helper, info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; - vma = i915_gem_obj_to_ggtt(obj); - /* setup aperture base/size for vesafb takeover */ info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = ggtt->mappable_end; - info->fix.smem_start = dev->mode_config.fb_base + vma->node.start; + info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma); info->fix.smem_len = vma->node.size; vaddr = i915_vma_pin_iomap(vma); @@ -273,14 +270,14 @@ static int intelfb_create(struct drm_fb_helper *helper, * If the object is stolen however, it will be full of whatever * garbage was left in there. */ - if (ifbdev->fb->obj->stolen && !prealloc) + if (intel_fb->obj->stolen && !prealloc) memset_io(info->screen_base, 0, info->screen_size); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n", - fb->width, fb->height, - i915_gem_obj_ggtt_offset(obj), obj); + DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n", + fb->width, fb->height, i915_ggtt_offset(vma)); + ifbdev->vma = vma; mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 623cf26..c973262 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -63,26 +63,25 @@ struct drm_i915_gem_request; * retcode: errno from last guc_submit() */ struct i915_guc_client { - struct drm_i915_gem_object *client_obj; + struct i915_vma *vma; void *client_base; /* first page (only) of above */ struct i915_gem_context *owner; struct intel_guc *guc; + + uint32_t engines; /* bitmap of (host) engine ids */ uint32_t priority; uint32_t ctx_index; - uint32_t proc_desc_offset; + uint32_t doorbell_offset; uint32_t cookie; uint16_t doorbell_id; - uint16_t padding; /* Maintain alignment */ + uint16_t padding[3]; /* Maintain alignment */ uint32_t wq_offset; uint32_t wq_size; uint32_t wq_tail; - uint32_t unused; /* Was 'wq_head' */ - uint32_t no_wq_space; - uint32_t q_fail; /* No longer used */ uint32_t b_fail; int retcode; @@ -125,11 +124,10 @@ struct intel_guc_fw { struct intel_guc { struct intel_guc_fw guc_fw; uint32_t log_flags; - struct drm_i915_gem_object *log_obj; - - struct drm_i915_gem_object *ads_obj; + struct i915_vma *log_vma; - struct drm_i915_gem_object *ctx_pool_obj; + struct i915_vma *ads_vma; + struct i915_vma *ctx_pool_vma; struct ida ctx_ids; struct i915_guc_client *execbuf_client; diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 3763e30..324812d 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -59,13 +59,25 @@ * */ -#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin" +#define SKL_FW_MAJOR 6 +#define SKL_FW_MINOR 1 + +#define BXT_FW_MAJOR 8 +#define BXT_FW_MINOR 7 + +#define KBL_FW_MAJOR 9 +#define KBL_FW_MINOR 14 + +#define GUC_FW_PATH(platform, major, minor) \ + "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin" + +#define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR) MODULE_FIRMWARE(I915_SKL_GUC_UCODE); -#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin" +#define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR) MODULE_FIRMWARE(I915_BXT_GUC_UCODE); -#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin" +#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR) MODULE_FIRMWARE(I915_KBL_GUC_UCODE); /* User-friendly representation of an enum */ @@ -181,16 +193,15 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv) i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; } - if (guc->ads_obj) { - u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj) - >> PAGE_SHIFT; + if (guc->ads_vma) { + u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; } /* If GuC submission is enabled, set up additional parameters here */ if (i915.enable_guc_submission) { - u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj); + u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma); u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16; pgs >>= PAGE_SHIFT; @@ -238,12 +249,12 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv, * Note that GuC needs the CSS header plus uKernel code to be copied by the * DMA engine in one operation, whereas the RSA signature is loaded via MMIO. */ -static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) +static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv, + struct i915_vma *vma) { struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; - struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj; unsigned long offset; - struct sg_table *sg = fw_obj->pages; + struct sg_table *sg = vma->pages; u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT]; int i, ret = 0; @@ -260,7 +271,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); /* Set the source address for the new blob */ - offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset; + offset = i915_ggtt_offset(vma) + guc_fw->header_offset; I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); @@ -315,6 +326,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) { struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; struct drm_device *dev = &dev_priv->drm; + struct i915_vma *vma; int ret; ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false); @@ -323,10 +335,10 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) return ret; } - ret = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0); - if (ret) { - DRM_DEBUG_DRIVER("pin failed %d\n", ret); - return ret; + vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) { + DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma)); + return PTR_ERR(vma); } /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */ @@ -369,7 +381,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) set_guc_init_params(dev_priv); - ret = guc_ucode_xfer_dma(dev_priv); + ret = guc_ucode_xfer_dma(dev_priv, vma); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); @@ -377,7 +389,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) * We keep the object pages for reuse during resume. But we can unpin it * now that DMA has completed, so it doesn't continue to take up space. */ - i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj); + i915_vma_unpin(vma); return ret; } @@ -697,16 +709,16 @@ void intel_guc_init(struct drm_device *dev) fw_path = NULL; } else if (IS_SKYLAKE(dev)) { fw_path = I915_SKL_GUC_UCODE; - guc_fw->guc_fw_major_wanted = 6; - guc_fw->guc_fw_minor_wanted = 1; + guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR; + guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR; } else if (IS_BROXTON(dev)) { fw_path = I915_BXT_GUC_UCODE; - guc_fw->guc_fw_major_wanted = 8; - guc_fw->guc_fw_minor_wanted = 7; + guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR; + guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR; } else if (IS_KABYLAKE(dev)) { fw_path = I915_KBL_GUC_UCODE; - guc_fw->guc_fw_major_wanted = 9; - guc_fw->guc_fw_minor_wanted = 14; + guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR; + guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR; } else { fw_path = ""; /* unknown device */ } diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 5dc2c20..334d47b 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -477,7 +477,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -void i915_hpd_poll_init_work(struct work_struct *work) { +static void i915_hpd_poll_init_work(struct work_struct *work) +{ struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, hotplug.poll_init_work); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 309c5d9..6b49df4 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -315,7 +315,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, desc = ctx->desc_template; /* bits 3-4 */ desc |= engine->ctx_desc_template; /* bits 0-11 */ - desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; + desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE; /* bits 12-31 */ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ @@ -763,7 +763,6 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine) static int intel_lr_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = ctx->i915; struct intel_context *ce = &ctx->engine[engine->id]; void *vaddr; u32 *lrc_reg_state; @@ -774,16 +773,15 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, if (ce->pin_count++) return 0; - ret = i915_gem_object_ggtt_pin(ce->state, NULL, - 0, GEN8_LR_CONTEXT_ALIGN, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, + PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL); if (ret) goto err; - vaddr = i915_gem_object_pin_map(ce->state); + vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); - goto unpin_ctx_obj; + goto unpin_vma; } lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; @@ -792,24 +790,26 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, if (ret) goto unpin_map; - ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state); intel_lr_context_descriptor_update(ctx, engine); - lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start; + lrc_reg_state[CTX_RING_BUFFER_START+1] = + i915_ggtt_offset(ce->ring->vma); ce->lrc_reg_state = lrc_reg_state; - ce->state->dirty = true; + ce->state->obj->dirty = true; /* Invalidate GuC TLB. */ - if (i915.enable_guc_submission) + if (i915.enable_guc_submission) { + struct drm_i915_private *dev_priv = ctx->i915; I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); + } i915_gem_context_get(ctx); return 0; unpin_map: - i915_gem_object_unpin_map(ce->state); -unpin_ctx_obj: - i915_gem_object_ggtt_unpin(ce->state); + i915_gem_object_unpin_map(ce->state->obj); +unpin_vma: + __i915_vma_unpin(ce->state); err: ce->pin_count = 0; return ret; @@ -828,12 +828,8 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx, intel_ring_unpin(ce->ring); - i915_gem_object_unpin_map(ce->state); - i915_gem_object_ggtt_unpin(ce->state); - - ce->lrc_vma = NULL; - ce->lrc_desc = 0; - ce->lrc_reg_state = NULL; + i915_gem_object_unpin_map(ce->state->obj); + i915_vma_unpin(ce->state); i915_gem_context_put(ctx); } @@ -919,7 +915,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); - wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); + wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256); wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); @@ -937,7 +933,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); - wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); + wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256); wa_ctx_emit(batch, index, 0); return index; @@ -998,7 +994,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ /* Actual scratch location is at 128 bytes offset */ - scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; + scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | @@ -1077,8 +1073,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, /* WaClearSlmSpaceAtContextSwitch:kbl */ /* Actual scratch location is at 128 bytes offset */ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) { - uint32_t scratch_addr - = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; + u32 scratch_addr = + i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | @@ -1170,45 +1166,44 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) { - int ret; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; - engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm, - PAGE_ALIGN(size)); - if (IS_ERR(engine->wa_ctx.obj)) { - DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); - ret = PTR_ERR(engine->wa_ctx.obj); - engine->wa_ctx.obj = NULL; - return ret; - } + obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size)); + if (IS_ERR(obj)) + return PTR_ERR(obj); - ret = i915_gem_object_ggtt_pin(engine->wa_ctx.obj, NULL, - 0, PAGE_SIZE, 0); - if (ret) { - DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n", - ret); - i915_gem_object_put(engine->wa_ctx.obj); - return ret; + vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; } + err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH); + if (err) + goto err; + + engine->wa_ctx.vma = vma; return 0; + +err: + i915_gem_object_put(obj); + return err; } static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine) { - if (engine->wa_ctx.obj) { - i915_gem_object_ggtt_unpin(engine->wa_ctx.obj); - i915_gem_object_put(engine->wa_ctx.obj); - engine->wa_ctx.obj = NULL; - } + i915_vma_unpin_and_release(&engine->wa_ctx.vma); } static int intel_init_workaround_bb(struct intel_engine_cs *engine) { - int ret; + struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; uint32_t *batch; uint32_t offset; struct page *page; - struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; + int ret; WARN_ON(engine->id != RCS); @@ -1220,7 +1215,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) } /* some WA perform writes to scratch page, ensure it is valid */ - if (engine->scratch.obj == NULL) { + if (!engine->scratch) { DRM_ERROR("scratch page not allocated for %s\n", engine->name); return -EINVAL; } @@ -1231,7 +1226,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return ret; } - page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0); + page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0); batch = kmap_atomic(page); offset = 0; @@ -1278,7 +1273,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; I915_WRITE(RING_HWS_PGA(engine->mmio_base), - (u32)engine->status_page.gfx_addr); + engine->status_page.ggtt_offset); POSTING_READ(RING_HWS_PGA(engine->mmio_base)); } @@ -1488,7 +1483,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, { struct intel_ring *ring = request->ring; struct intel_engine_cs *engine = request->engine; - u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; + u32 scratch_addr = + i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; bool vf_flush_wa = false, dc_flush_wa = false; u32 flags = 0; int ret; @@ -1700,9 +1696,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) intel_engine_cleanup_common(engine); - if (engine->status_page.obj) { - i915_gem_object_unpin_map(engine->status_page.obj); - engine->status_page.obj = NULL; + if (engine->status_page.vma) { + i915_gem_object_unpin_map(engine->status_page.vma->obj); + engine->status_page.vma = NULL; } intel_lr_context_unpin(dev_priv->kernel_context, engine); @@ -1747,19 +1743,19 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) } static int -lrc_setup_hws(struct intel_engine_cs *engine, - struct drm_i915_gem_object *dctx_obj) +lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma) { + const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE; void *hws; /* The HWSP is part of the default context object in LRC mode. */ - engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) + - LRC_PPHWSP_PN * PAGE_SIZE; - hws = i915_gem_object_pin_map(dctx_obj); + hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); if (IS_ERR(hws)) return PTR_ERR(hws); - engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE; - engine->status_page.obj = dctx_obj; + + engine->status_page.page_addr = hws + hws_offset; + engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset; + engine->status_page.vma = vma; return 0; } @@ -1849,11 +1845,10 @@ int logical_render_ring_init(struct intel_engine_cs *engine) else engine->init_hw = gen8_init_render_ring; engine->init_context = gen8_init_rcs_context; - engine->cleanup = intel_fini_pipe_control; engine->emit_flush = gen8_emit_flush_render; engine->emit_request = gen8_emit_request_render; - ret = intel_init_pipe_control(engine, 4096); + ret = intel_engine_create_scratch(engine, 4096); if (ret) return ret; @@ -1968,7 +1963,7 @@ populate_lr_context(struct i915_gem_context *ctx, return ret; } - vaddr = i915_gem_object_pin_map(ctx_obj); + vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); @@ -2025,9 +2020,9 @@ populate_lr_context(struct i915_gem_context *ctx, RING_INDIRECT_CTX(engine->mmio_base), 0); ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0); - if (engine->wa_ctx.obj) { + if (engine->wa_ctx.vma) { struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; - uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); + u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); reg_state[CTX_RCS_INDIRECT_CTX+1] = (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) | @@ -2131,6 +2126,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, { struct drm_i915_gem_object *ctx_obj; struct intel_context *ce = &ctx->engine[engine->id]; + struct i915_vma *vma; uint32_t context_size; struct intel_ring *ring; int ret; @@ -2148,6 +2144,12 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, return PTR_ERR(ctx_obj); } + vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto error_deref_obj; + } + ring = intel_engine_create_ring(engine, ctx->ring_size); if (IS_ERR(ring)) { ret = PTR_ERR(ring); @@ -2161,7 +2163,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, } ce->ring = ring; - ce->state = ctx_obj; + ce->state = vma; ce->initialised = engine->init_context == NULL; return 0; @@ -2170,8 +2172,6 @@ error_ring_free: intel_ring_free(ring); error_deref_obj: i915_gem_object_put(ctx_obj); - ce->ring = NULL; - ce->state = NULL; return ret; } @@ -2182,24 +2182,23 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv, for_each_engine(engine, dev_priv) { struct intel_context *ce = &ctx->engine[engine->id]; - struct drm_i915_gem_object *ctx_obj = ce->state; void *vaddr; uint32_t *reg_state; - if (!ctx_obj) + if (!ce->state) continue; - vaddr = i915_gem_object_pin_map(ctx_obj); + vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); if (WARN_ON(IS_ERR(vaddr))) continue; reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; - ctx_obj->dirty = true; reg_state[CTX_RING_HEAD+1] = 0; reg_state[CTX_RING_TAIL+1] = 0; - i915_gem_object_unpin_map(ctx_obj); + ce->state->obj->dirty = true; + i915_gem_object_unpin_map(ce->state->obj); ce->ring->head = 0; ce->ring->tail = 0; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index e29f3d1..52d6ed6 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -48,6 +48,20 @@ struct intel_lvds_connector { struct notifier_block lid_notifier; }; +struct intel_lvds_pps { + /* 100us units */ + int t1_t2; + int t3; + int t4; + int t5; + int tx; + + int divider; + + int port; + bool powerdown_on_reset; +}; + struct intel_lvds_encoder { struct intel_encoder base; @@ -55,6 +69,9 @@ struct intel_lvds_encoder { i915_reg_t reg; u32 a3_power; + struct intel_lvds_pps init_pps; + u32 init_lvds_val; + struct intel_lvds_connector *attached_connector; }; @@ -136,6 +153,83 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; } +static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_lvds_pps *pps) +{ + u32 val; + + pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET; + + val = I915_READ(PP_ON_DELAYS(0)); + pps->port = (val & PANEL_PORT_SELECT_MASK) >> + PANEL_PORT_SELECT_SHIFT; + pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >> + PANEL_POWER_UP_DELAY_SHIFT; + pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >> + PANEL_LIGHT_ON_DELAY_SHIFT; + + val = I915_READ(PP_OFF_DELAYS(0)); + pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >> + PANEL_POWER_DOWN_DELAY_SHIFT; + pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >> + PANEL_LIGHT_OFF_DELAY_SHIFT; + + val = I915_READ(PP_DIVISOR(0)); + pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >> + PP_REFERENCE_DIVIDER_SHIFT; + val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >> + PANEL_POWER_CYCLE_DELAY_SHIFT; + /* + * Remove the BSpec specified +1 (100ms) offset that accounts for a + * too short power-cycle delay due to the asynchronous programming of + * the register. + */ + if (val) + val--; + /* Convert from 100ms to 100us units */ + pps->t4 = val * 1000; + + if (INTEL_INFO(dev_priv)->gen <= 4 && + pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) { + DRM_DEBUG_KMS("Panel power timings uninitialized, " + "setting defaults\n"); + /* Set T2 to 40ms and T5 to 200ms in 100 usec units */ + pps->t1_t2 = 40 * 10; + pps->t5 = 200 * 10; + /* Set T3 to 35ms and Tx to 200ms in 100 usec units */ + pps->t3 = 35 * 10; + pps->tx = 200 * 10; + } + + DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d " + "divider %d port %d powerdown_on_reset %d\n", + pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx, + pps->divider, pps->port, pps->powerdown_on_reset); +} + +static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, + struct intel_lvds_pps *pps) +{ + u32 val; + + val = I915_READ(PP_CONTROL(0)); + WARN_ON((val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS); + if (pps->powerdown_on_reset) + val |= PANEL_POWER_RESET; + I915_WRITE(PP_CONTROL(0), val); + + I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) | + (pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) | + (pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT)); + I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) | + (pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT)); + + val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT; + val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) << + PANEL_POWER_CYCLE_DELAY_SHIFT; + I915_WRITE(PP_DIVISOR(0), val); +} + static void intel_pre_enable_lvds(struct intel_encoder *encoder) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); @@ -154,7 +248,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) assert_pll_disabled(dev_priv, pipe); } - temp = I915_READ(lvds_encoder->reg); + intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps); + + temp = lvds_encoder->init_lvds_val; temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; if (HAS_PCH_CPT(dev)) { @@ -217,21 +313,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder) struct intel_connector *intel_connector = &lvds_encoder->attached_connector->base; struct drm_i915_private *dev_priv = to_i915(dev); - i915_reg_t ctl_reg, stat_reg; - - if (HAS_PCH_SPLIT(dev)) { - ctl_reg = PCH_PP_CONTROL; - stat_reg = PCH_PP_STATUS; - } else { - ctl_reg = PP_CONTROL; - stat_reg = PP_STATUS; - } I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN); - I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); + I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON); POSTING_READ(lvds_encoder->reg); - if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000)) + if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000)) DRM_ERROR("timed out waiting for panel to power on\n"); intel_panel_enable_backlight(intel_connector); @@ -242,18 +329,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder) struct drm_device *dev = encoder->base.dev; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_i915_private *dev_priv = to_i915(dev); - i915_reg_t ctl_reg, stat_reg; - - if (HAS_PCH_SPLIT(dev)) { - ctl_reg = PCH_PP_CONTROL; - stat_reg = PCH_PP_STATUS; - } else { - ctl_reg = PP_CONTROL; - stat_reg = PP_STATUS; - } - I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); - if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000)) + I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON); + if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000)) DRM_ERROR("timed out waiting for panel to power off\n"); I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); @@ -900,17 +978,6 @@ void intel_lvds_init(struct drm_device *dev) int pipe; u8 pin; - /* - * Unlock registers and just leave them unlocked. Do this before - * checking quirk lists to avoid bogus WARNINGs. - */ - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_PP_CONTROL, - I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); - } else if (INTEL_INFO(dev_priv)->gen < 5) { - I915_WRITE(PP_CONTROL, - I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); - } if (!intel_lvds_supported(dev)) return; @@ -943,18 +1010,6 @@ void intel_lvds_init(struct drm_device *dev) DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n"); } - /* Set the Panel Power On/Off timings if uninitialized. */ - if (INTEL_INFO(dev_priv)->gen < 5 && - I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) { - /* Set T2 to 40ms and T5 to 200ms */ - I915_WRITE(PP_ON_DELAYS, 0x019007d0); - - /* Set T3 to 35ms and Tx to 200ms */ - I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); - - DRM_DEBUG_KMS("Panel power timings uninitialized, setting defaults\n"); - } - lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL); if (!lvds_encoder) return; @@ -1020,6 +1075,10 @@ void intel_lvds_init(struct drm_device *dev) dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_ASPECT); intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; + + intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps); + lvds_encoder->init_lvds_val = lvds; + /* * LVDS discovery: * 1) check for EDID on DDC diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 90f3ab4..a24bc8c 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -171,8 +171,8 @@ struct overlay_registers { struct intel_overlay { struct drm_i915_private *i915; struct intel_crtc *crtc; - struct drm_i915_gem_object *vid_bo; - struct drm_i915_gem_object *old_vid_bo; + struct i915_vma *vma; + struct i915_vma *old_vma; bool active; bool pfit_active; u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ @@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_wc(dev_priv->ggtt.mappable, + regs = io_mapping_map_wc(&dev_priv->ggtt.mappable, overlay->flip_addr, PAGE_SIZE); @@ -317,15 +317,17 @@ static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active, { struct intel_overlay *overlay = container_of(active, typeof(*overlay), last_flip); - struct drm_i915_gem_object *obj = overlay->old_vid_bo; + struct i915_vma *vma; - i915_gem_track_fb(obj, NULL, - INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); + vma = fetch_and_zero(&overlay->old_vma); + if (WARN_ON(!vma)) + return; - i915_gem_object_ggtt_unpin(obj); - i915_gem_object_put(obj); + i915_gem_track_fb(vma->obj, NULL, + INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); - overlay->old_vid_bo = NULL; + i915_gem_object_unpin_from_display_plane(vma); + i915_vma_put(vma); } static void intel_overlay_off_tail(struct i915_gem_active *active, @@ -333,15 +335,15 @@ static void intel_overlay_off_tail(struct i915_gem_active *active, { struct intel_overlay *overlay = container_of(active, typeof(*overlay), last_flip); - struct drm_i915_gem_object *obj = overlay->vid_bo; + struct i915_vma *vma; /* never have the overlay hw on without showing a frame */ - if (WARN_ON(!obj)) + vma = fetch_and_zero(&overlay->vma); + if (WARN_ON(!vma)) return; - i915_gem_object_ggtt_unpin(obj); - i915_gem_object_put(obj); - overlay->vid_bo = NULL; + i915_gem_object_unpin_from_display_plane(vma); + i915_vma_put(vma); overlay->crtc->overlay = NULL; overlay->crtc = NULL; @@ -421,7 +423,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) /* Only wait if there is actually an old frame to release to * guarantee forward progress. */ - if (!overlay->old_vid_bo) + if (!overlay->old_vma) return 0; if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { @@ -744,6 +746,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, struct drm_i915_private *dev_priv = overlay->i915; u32 swidth, swidthsw, sheight, ostride; enum pipe pipe = overlay->crtc->pipe; + struct i915_vma *vma; lockdep_assert_held(&dev_priv->drm.struct_mutex); WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); @@ -752,12 +755,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) return ret; - ret = i915_gem_object_pin_to_display_plane(new_bo, 0, + vma = i915_gem_object_pin_to_display_plane(new_bo, 0, &i915_ggtt_view_normal); - if (ret != 0) - return ret; + if (IS_ERR(vma)) + return PTR_ERR(vma); - ret = i915_gem_object_put_fence(new_bo); + ret = i915_vma_put_fence(vma); if (ret) goto out_unpin; @@ -798,7 +801,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, swidth = params->src_w; swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width); sheight = params->src_h; - iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y); + iowrite32(i915_ggtt_offset(vma) + params->offset_Y, ®s->OBUF_0Y); ostride = params->stride_Y; if (params->format & I915_OVERLAY_YUV_PLANAR) { @@ -812,8 +815,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, params->src_w/uv_hscale); swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; sheight |= (params->src_h/uv_vscale) << 16; - iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U); - iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V); + iowrite32(i915_ggtt_offset(vma) + params->offset_U, + ®s->OBUF_0U); + iowrite32(i915_ggtt_offset(vma) + params->offset_V, + ®s->OBUF_0V); ostride |= params->stride_UV << 16; } @@ -834,18 +839,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret) goto out_unpin; - i915_gem_track_fb(overlay->vid_bo, new_bo, + i915_gem_track_fb(overlay->vma->obj, new_bo, INTEL_FRONTBUFFER_OVERLAY(pipe)); - overlay->old_vid_bo = overlay->vid_bo; - overlay->vid_bo = new_bo; + overlay->old_vma = overlay->vma; + overlay->vma = vma; intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe)); return 0; out_unpin: - i915_gem_object_ggtt_unpin(new_bo); + i915_gem_object_unpin_from_display_plane(vma); return ret; } @@ -1368,6 +1373,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) struct intel_overlay *overlay; struct drm_i915_gem_object *reg_bo; struct overlay_registers __iomem *regs; + struct i915_vma *vma = NULL; int ret; if (!HAS_OVERLAY(dev_priv)) @@ -1401,13 +1407,14 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) } overlay->flip_addr = reg_bo->phys_handle->busaddr; } else { - ret = i915_gem_object_ggtt_pin(reg_bo, NULL, + vma = i915_gem_object_ggtt_pin(reg_bo, NULL, 0, PAGE_SIZE, PIN_MAPPABLE); - if (ret) { + if (IS_ERR(vma)) { DRM_ERROR("failed to pin overlay register bo\n"); + ret = PTR_ERR(vma); goto out_free_bo; } - overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo); + overlay->flip_addr = i915_ggtt_offset(vma); ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); if (ret) { @@ -1439,8 +1446,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) return; out_unpin_bo: - if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) - i915_gem_object_ggtt_unpin(reg_bo); + if (vma) + i915_vma_unpin(vma); out_free_bo: i915_gem_object_put(reg_bo); out_free: @@ -1482,7 +1489,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) regs = (struct overlay_registers __iomem *) overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, + regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable, overlay->flip_addr); return regs; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 81ab119..8a6751e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3115,8 +3115,6 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate) total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; } - WARN_ON(cstate->plane_mask && total_data_rate == 0); - return total_data_rate; } @@ -3920,9 +3918,24 @@ skl_compute_ddb(struct drm_atomic_state *state) * pretend that all pipes switched active status so that we'll * ensure a full DDB recompute. */ - if (dev_priv->wm.distrust_bios_wm) + if (dev_priv->wm.distrust_bios_wm) { + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, + state->acquire_ctx); + if (ret) + return ret; + intel_state->active_pipe_changes = ~0; + /* + * We usually only initialize intel_state->active_crtcs if we + * we're doing a modeset; make sure this field is always + * initialized during the sanitization process that happens + * on the first commit too. + */ + if (!intel_state->modeset) + intel_state->active_crtcs = dev_priv->active_crtcs; + } + /* * If the modeset changes which CRTC's are active, we need to * recompute the DDB allocation for *all* active pipes, even @@ -5675,8 +5688,6 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) u32 pcbr; int pctx_size = 24*1024; - mutex_lock(&dev_priv->drm.struct_mutex); - pcbr = I915_READ(VLV_PCBR); if (pcbr) { /* BIOS set it up already, grab the pre-alloc'd space */ @@ -5712,7 +5723,6 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) out: DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); dev_priv->vlv_pctx = pctx; - mutex_unlock(&dev_priv->drm.struct_mutex); } static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) @@ -6488,6 +6498,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) intel_runtime_pm_get(dev_priv); } + mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->rps.hw_lock); /* Initialize RPS limits (for userspace) */ @@ -6529,6 +6540,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) dev_priv->rps.boost_freq = dev_priv->rps.max_freq; mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->drm.struct_mutex); intel_autoenable_gt_powersave(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e08a1e1..cc5bcd1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -176,7 +176,7 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) { struct intel_ring *ring = req->ring; u32 scratch_addr = - req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; int ret; ret = intel_ring_begin(req, 6); @@ -212,7 +212,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { struct intel_ring *ring = req->ring; u32 scratch_addr = - req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; @@ -286,7 +286,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { struct intel_ring *ring = req->ring; u32 scratch_addr = - req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; @@ -370,7 +370,8 @@ gen8_emit_pipe_control(struct drm_i915_gem_request *req, static int gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { - u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; + u32 scratch_addr = + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; u32 flags = 0; int ret; @@ -466,7 +467,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) mmio = RING_HWS_PGA(engine->mmio_base); } - I915_WRITE(mmio, (u32)engine->status_page.gfx_addr); + I915_WRITE(mmio, engine->status_page.ggtt_offset); POSTING_READ(mmio); /* @@ -497,7 +498,7 @@ static bool stop_ring(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - if (!IS_GEN2(dev_priv)) { + if (INTEL_GEN(dev_priv) > 2) { I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); if (intel_wait_for_register(dev_priv, RING_MI_MODE(engine->mmio_base), @@ -519,7 +520,7 @@ static bool stop_ring(struct intel_engine_cs *engine) I915_WRITE_HEAD(engine, 0); I915_WRITE_TAIL(engine, 0); - if (!IS_GEN2(dev_priv)) { + if (INTEL_GEN(dev_priv) > 2) { (void)I915_READ_CTL(engine); I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); } @@ -531,7 +532,6 @@ static int init_ring_common(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; struct intel_ring *ring = engine->buffer; - struct drm_i915_gem_object *obj = ring->obj; int ret = 0; intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -571,7 +571,7 @@ static int init_ring_common(struct intel_engine_cs *engine) * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj)); + I915_WRITE_START(engine, i915_ggtt_offset(ring->vma)); /* WaClearRingBufHeadRegAtInit:ctg,elk */ if (I915_READ_HEAD(engine)) @@ -586,16 +586,16 @@ static int init_ring_common(struct intel_engine_cs *engine) /* If the head is still not zero, the ring is dead */ if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 && - I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) && + I915_READ_START(engine) == i915_ggtt_offset(ring->vma) && (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " - "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", + "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08x]\n", engine->name, I915_READ_CTL(engine), I915_READ_CTL(engine) & RING_VALID, I915_READ_HEAD(engine), I915_READ_TAIL(engine), I915_READ_START(engine), - (unsigned long)i915_gem_obj_ggtt_offset(obj)); + i915_ggtt_offset(ring->vma)); ret = -EIO; goto out; } @@ -613,48 +613,6 @@ out: return ret; } -void intel_fini_pipe_control(struct intel_engine_cs *engine) -{ - if (engine->scratch.obj == NULL) - return; - - i915_gem_object_ggtt_unpin(engine->scratch.obj); - i915_gem_object_put(engine->scratch.obj); - engine->scratch.obj = NULL; -} - -int intel_init_pipe_control(struct intel_engine_cs *engine, int size) -{ - struct drm_i915_gem_object *obj; - int ret; - - WARN_ON(engine->scratch.obj); - - obj = i915_gem_object_create_stolen(&engine->i915->drm, size); - if (!obj) - obj = i915_gem_object_create(&engine->i915->drm, size); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate scratch page\n"); - ret = PTR_ERR(obj); - goto err; - } - - ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, PIN_HIGH); - if (ret) - goto err_unref; - - engine->scratch.obj = obj; - engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); - DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", - engine->name, engine->scratch.gtt_offset); - return 0; - -err_unref: - i915_gem_object_put(engine->scratch.obj); -err: - return ret; -} - static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) { struct intel_ring *ring = req->ring; @@ -1300,13 +1258,7 @@ static void render_ring_cleanup(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - if (dev_priv->semaphore_obj) { - i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); - i915_gem_object_put(dev_priv->semaphore_obj); - dev_priv->semaphore_obj = NULL; - } - - intel_fini_pipe_control(engine); + i915_vma_unpin_and_release(&dev_priv->semaphore); } static int gen8_rcs_signal(struct drm_i915_gem_request *req) @@ -1317,7 +1269,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *req) enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); + num_rings = INTEL_INFO(dev_priv)->num_rings; ret = intel_ring_begin(req, (num_rings-1) * 8); if (ret) return ret; @@ -1354,7 +1306,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *req) enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); + num_rings = INTEL_INFO(dev_priv)->num_rings; ret = intel_ring_begin(req, (num_rings-1) * 6); if (ret) return ret; @@ -1385,18 +1337,21 @@ static int gen6_signal(struct drm_i915_gem_request *req) { struct intel_ring *ring = req->ring; struct drm_i915_private *dev_priv = req->i915; - struct intel_engine_cs *useless; - enum intel_engine_id id; + struct intel_engine_cs *engine; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); + num_rings = INTEL_INFO(dev_priv)->num_rings; ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2)); if (ret) return ret; - for_each_engine_id(useless, dev_priv, id) { - i915_reg_t mbox_reg = req->engine->semaphore.mbox.signal[id]; + for_each_engine(engine, dev_priv) { + i915_reg_t mbox_reg; + + if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK)) + continue; + mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id]; if (i915_mmio_reg_valid(mbox_reg)) { intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit_reg(ring, mbox_reg); @@ -1543,7 +1498,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req, u32 dw1 = MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER; - u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->id]; + u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id]; int ret; WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); @@ -1764,7 +1719,7 @@ i830_emit_bb_start(struct drm_i915_gem_request *req, unsigned int dispatch_flags) { struct intel_ring *ring = req->ring; - u32 cs_offset = req->engine->scratch.gtt_offset; + u32 cs_offset = i915_ggtt_offset(req->engine->scratch); int ret; ret = intel_ring_begin(req, 6); @@ -1853,79 +1808,79 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine) static void cleanup_status_page(struct intel_engine_cs *engine) { - struct drm_i915_gem_object *obj; + struct i915_vma *vma; - obj = engine->status_page.obj; - if (obj == NULL) + vma = fetch_and_zero(&engine->status_page.vma); + if (!vma) return; - kunmap(sg_page(obj->pages->sgl)); - i915_gem_object_ggtt_unpin(obj); - i915_gem_object_put(obj); - engine->status_page.obj = NULL; + i915_vma_unpin(vma); + i915_gem_object_unpin_map(vma->obj); + i915_vma_put(vma); } static int init_status_page(struct intel_engine_cs *engine) { - struct drm_i915_gem_object *obj = engine->status_page.obj; - - if (obj == NULL) { - unsigned flags; - int ret; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + unsigned int flags; + int ret; - obj = i915_gem_object_create(&engine->i915->drm, 4096); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate status page\n"); - return PTR_ERR(obj); - } + obj = i915_gem_object_create(&engine->i915->drm, 4096); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate status page\n"); + return PTR_ERR(obj); + } - ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - if (ret) - goto err_unref; - - flags = 0; - if (!HAS_LLC(engine->i915)) - /* On g33, we cannot place HWS above 256MiB, so - * restrict its pinning to the low mappable arena. - * Though this restriction is not documented for - * gen4, gen5, or byt, they also behave similarly - * and hang if the HWS is placed at the top of the - * GTT. To generalise, it appears that all !llc - * platforms have issues with us placing the HWS - * above the mappable region (even though we never - * actualy map it). - */ - flags |= PIN_MAPPABLE; - ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, flags); - if (ret) { -err_unref: - i915_gem_object_put(obj); - return ret; - } + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + if (ret) + goto err; - engine->status_page.obj = obj; + vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err; } - engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); - engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); - memset(engine->status_page.page_addr, 0, PAGE_SIZE); + flags = PIN_GLOBAL; + if (!HAS_LLC(engine->i915)) + /* On g33, we cannot place HWS above 256MiB, so + * restrict its pinning to the low mappable arena. + * Though this restriction is not documented for + * gen4, gen5, or byt, they also behave similarly + * and hang if the HWS is placed at the top of the + * GTT. To generalise, it appears that all !llc + * platforms have issues with us placing the HWS + * above the mappable region (even though we never + * actualy map it). + */ + flags |= PIN_MAPPABLE; + ret = i915_vma_pin(vma, 0, 4096, flags); + if (ret) + goto err; - DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", - engine->name, engine->status_page.gfx_addr); + engine->status_page.vma = vma; + engine->status_page.ggtt_offset = i915_ggtt_offset(vma); + engine->status_page.page_addr = + i915_gem_object_pin_map(obj, I915_MAP_WB); + DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", + engine->name, i915_ggtt_offset(vma)); return 0; + +err: + i915_gem_object_put(obj); + return ret; } static int init_phys_status_page(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - if (!dev_priv->status_page_dmah) { - dev_priv->status_page_dmah = - drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); - if (!dev_priv->status_page_dmah) - return -ENOMEM; - } + dev_priv->status_page_dmah = + drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); + if (!dev_priv->status_page_dmah) + return -ENOMEM; engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; memset(engine->status_page.page_addr, 0, PAGE_SIZE); @@ -1935,55 +1890,46 @@ static int init_phys_status_page(struct intel_engine_cs *engine) int intel_ring_pin(struct intel_ring *ring) { - struct drm_i915_private *dev_priv = ring->engine->i915; - struct drm_i915_gem_object *obj = ring->obj; /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ - unsigned flags = PIN_OFFSET_BIAS | 4096; + unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096; + enum i915_map_type map; + struct i915_vma *vma = ring->vma; void *addr; int ret; - if (HAS_LLC(dev_priv) && !obj->stolen) { - ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, flags); - if (ret) - return ret; + GEM_BUG_ON(ring->vaddr); - ret = i915_gem_object_set_to_cpu_domain(obj, true); - if (ret) - goto err_unpin; + map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC; - addr = i915_gem_object_pin_map(obj); - if (IS_ERR(addr)) { - ret = PTR_ERR(addr); - goto err_unpin; - } - } else { - ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, - flags | PIN_MAPPABLE); - if (ret) - return ret; + if (vma->obj->stolen) + flags |= PIN_MAPPABLE; - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - goto err_unpin; + if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { + if (flags & PIN_MAPPABLE || map == I915_MAP_WC) + ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); + else + ret = i915_gem_object_set_to_cpu_domain(vma->obj, true); + if (unlikely(ret)) + return ret; + } - /* Access through the GTT requires the device to be awake. */ - assert_rpm_wakelock_held(dev_priv); + ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags); + if (unlikely(ret)) + return ret; - addr = (void __force *) - i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj)); - if (IS_ERR(addr)) { - ret = PTR_ERR(addr); - goto err_unpin; - } - } + if (i915_vma_is_map_and_fenceable(vma)) + addr = (void __force *)i915_vma_pin_iomap(vma); + else + addr = i915_gem_object_pin_map(vma->obj, map); + if (IS_ERR(addr)) + goto err; ring->vaddr = addr; - ring->vma = i915_gem_obj_to_ggtt(obj); return 0; -err_unpin: - i915_gem_object_ggtt_unpin(obj); - return ret; +err: + i915_vma_unpin(vma); + return PTR_ERR(addr); } void intel_ring_unpin(struct intel_ring *ring) @@ -1991,60 +1937,54 @@ void intel_ring_unpin(struct intel_ring *ring) GEM_BUG_ON(!ring->vma); GEM_BUG_ON(!ring->vaddr); - if (HAS_LLC(ring->engine->i915) && !ring->obj->stolen) - i915_gem_object_unpin_map(ring->obj); - else + if (i915_vma_is_map_and_fenceable(ring->vma)) i915_vma_unpin_iomap(ring->vma); + else + i915_gem_object_unpin_map(ring->vma->obj); ring->vaddr = NULL; - i915_gem_object_ggtt_unpin(ring->obj); - ring->vma = NULL; + i915_vma_unpin(ring->vma); } -static void intel_destroy_ringbuffer_obj(struct intel_ring *ring) -{ - i915_gem_object_put(ring->obj); - ring->obj = NULL; -} - -static int intel_alloc_ringbuffer_obj(struct drm_device *dev, - struct intel_ring *ring) +static struct i915_vma * +intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) { struct drm_i915_gem_object *obj; + struct i915_vma *vma; - obj = NULL; - if (!HAS_LLC(dev)) - obj = i915_gem_object_create_stolen(dev, ring->size); - if (obj == NULL) - obj = i915_gem_object_create(dev, ring->size); + obj = i915_gem_object_create_stolen(&dev_priv->drm, size); + if (!obj) + obj = i915_gem_object_create(&dev_priv->drm, size); if (IS_ERR(obj)) - return PTR_ERR(obj); + return ERR_CAST(obj); /* mark ring buffers as read-only from GPU side by default */ obj->gt_ro = 1; - ring->obj = obj; + vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); + if (IS_ERR(vma)) + goto err; + + return vma; - return 0; +err: + i915_gem_object_put(obj); + return vma; } struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, int size) { struct intel_ring *ring; - int ret; + struct i915_vma *vma; GEM_BUG_ON(!is_power_of_2(size)); ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (ring == NULL) { - DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", - engine->name); + if (!ring) return ERR_PTR(-ENOMEM); - } ring->engine = engine; - list_add(&ring->link, &engine->buffers); INIT_LIST_HEAD(&ring->request_list); @@ -2060,22 +2000,21 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size) ring->last_retired_head = -1; intel_ring_update_space(ring); - ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring); - if (ret) { - DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", - engine->name, ret); - list_del(&ring->link); + vma = intel_ring_create_vma(engine->i915, size); + if (IS_ERR(vma)) { kfree(ring); - return ERR_PTR(ret); + return ERR_CAST(vma); } + ring->vma = vma; + list_add(&ring->link, &engine->buffers); return ring; } void intel_ring_free(struct intel_ring *ring) { - intel_destroy_ringbuffer_obj(ring); + i915_vma_put(ring->vma); list_del(&ring->link); kfree(ring); } @@ -2092,8 +2031,12 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx, return 0; if (ce->state) { - ret = i915_gem_object_ggtt_pin(ce->state, NULL, 0, - ctx->ggtt_alignment, 0); + ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false); + if (ret) + goto error; + + ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment, + PIN_GLOBAL | PIN_HIGH); if (ret) goto error; } @@ -2127,7 +2070,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx, return; if (ce->state) - i915_gem_object_ggtt_unpin(ce->state); + i915_vma_unpin(ce->state); i915_gem_context_put(ctx); } @@ -2165,7 +2108,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) ret = PTR_ERR(ring); goto error; } - engine->buffer = ring; if (I915_NEED_GFX_HWS(dev_priv)) { ret = init_status_page(engine); @@ -2180,11 +2122,10 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) ret = intel_ring_pin(ring); if (ret) { - DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", - engine->name, ret); - intel_destroy_ringbuffer_obj(ring); + intel_ring_free(ring); goto error; } + engine->buffer = ring; return 0; @@ -2203,7 +2144,8 @@ void intel_engine_cleanup(struct intel_engine_cs *engine) dev_priv = engine->i915; if (engine->buffer) { - WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); + WARN_ON(INTEL_GEN(dev_priv) > 2 && + (I915_READ_MODE(engine) & MODE_IDLE) == 0); intel_ring_unpin(engine->buffer); intel_ring_free(engine->buffer); @@ -2371,50 +2313,6 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) return 0; } -void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - struct drm_i915_private *dev_priv = engine->i915; - - /* Our semaphore implementation is strictly monotonic (i.e. we proceed - * so long as the semaphore value in the register/page is greater - * than the sync value), so whenever we reset the seqno, - * so long as we reset the tracking semaphore value to 0, it will - * always be before the next request's seqno. If we don't reset - * the semaphore value, then when the seqno moves backwards all - * future waits will complete instantly (causing rendering corruption). - */ - if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { - I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); - I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); - if (HAS_VEBOX(dev_priv)) - I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); - } - if (dev_priv->semaphore_obj) { - struct drm_i915_gem_object *obj = dev_priv->semaphore_obj; - struct page *page = i915_gem_object_get_dirty_page(obj, 0); - void *semaphores = kmap(page); - memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), - 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size); - kunmap(page); - } - memset(engine->semaphore.sync_seqno, 0, - sizeof(engine->semaphore.sync_seqno)); - - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); - if (engine->irq_seqno_barrier) - engine->irq_seqno_barrier(engine); - engine->last_submitted_seqno = seqno; - - engine->hangcheck.seqno = seqno; - - /* After manually advancing the seqno, fake the interrupt in case - * there are any waiters for that seqno. - */ - rcu_read_lock(); - intel_engine_wakeup(engine); - rcu_read_unlock(); -} - static void gen6_bsd_submit_request(struct drm_i915_gem_request *request) { struct drm_i915_private *dev_priv = request->i915; @@ -2624,35 +2522,36 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, if (!i915.semaphores) return; - if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) { + if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { + struct i915_vma *vma; + obj = i915_gem_object_create(&dev_priv->drm, 4096); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); - i915.semaphores = 0; - } else { - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); - if (ret != 0) { - i915_gem_object_put(obj); - DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); - i915.semaphores = 0; - } else { - dev_priv->semaphore_obj = obj; - } - } - } + if (IS_ERR(obj)) + goto err; - if (!i915.semaphores) - return; + vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); + if (IS_ERR(vma)) + goto err_obj; + + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) + goto err_obj; + + ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (ret) + goto err_obj; + + dev_priv->semaphore = vma; + } if (INTEL_GEN(dev_priv) >= 8) { - u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); + u32 offset = i915_ggtt_offset(dev_priv->semaphore); engine->semaphore.sync_to = gen8_ring_sync_to; engine->semaphore.signal = gen8_xcs_signal; for (i = 0; i < I915_NUM_ENGINES; i++) { - u64 ring_offset; + u32 ring_offset; if (i != engine->id) ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i); @@ -2672,47 +2571,55 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, * initialized as INVALID. Gen8 will initialize the * sema between VCS2 and RCS later. */ - for (i = 0; i < I915_NUM_ENGINES; i++) { + for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) { static const struct { u32 wait_mbox; i915_reg_t mbox_reg; - } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = { - [RCS] = { - [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, - [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, - [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, + } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = { + [RCS_HW] = { + [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, + [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, + [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, }, - [VCS] = { - [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, - [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, - [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, + [VCS_HW] = { + [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, + [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, + [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, }, - [BCS] = { - [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, - [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, - [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, + [BCS_HW] = { + [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, + [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, + [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, }, - [VECS] = { - [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, - [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, - [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, + [VECS_HW] = { + [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, + [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, + [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, }, }; u32 wait_mbox; i915_reg_t mbox_reg; - if (i == engine->id || i == VCS2) { + if (i == engine->hw_id) { wait_mbox = MI_SEMAPHORE_SYNC_INVALID; mbox_reg = GEN6_NOSYNC; } else { - wait_mbox = sem_data[engine->id][i].wait_mbox; - mbox_reg = sem_data[engine->id][i].mbox_reg; + wait_mbox = sem_data[engine->hw_id][i].wait_mbox; + mbox_reg = sem_data[engine->hw_id][i].mbox_reg; } engine->semaphore.mbox.wait[i] = wait_mbox; engine->semaphore.mbox.signal[i] = mbox_reg; } } + + return; + +err_obj: + i915_gem_object_put(obj); +err: + DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n"); + i915.semaphores = 0; } static void intel_ring_init_irq(struct drm_i915_private *dev_priv, @@ -2808,11 +2715,11 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) return ret; if (INTEL_GEN(dev_priv) >= 6) { - ret = intel_init_pipe_control(engine, 4096); + ret = intel_engine_create_scratch(engine, 4096); if (ret) return ret; } else if (HAS_BROKEN_CS_TLB(dev_priv)) { - ret = intel_init_pipe_control(engine, I830_WA_SIZE); + ret = intel_engine_create_scratch(engine, I830_WA_SIZE); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 43e545e..84aea54 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -26,10 +26,10 @@ */ #define I915_RING_FREE_SPACE 64 -struct intel_hw_status_page { - u32 *page_addr; - unsigned int gfx_addr; - struct drm_i915_gem_object *obj; +struct intel_hw_status_page { + struct i915_vma *vma; + u32 *page_addr; + u32 ggtt_offset; }; #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base)) @@ -57,10 +57,10 @@ struct intel_hw_status_page { #define GEN8_SEMAPHORE_OFFSET(__from, __to) \ (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size) #define GEN8_SIGNAL_OFFSET(__ring, to) \ - (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ + (dev_priv->semaphore->node.start + \ GEN8_SEMAPHORE_OFFSET((__ring)->id, (to))) #define GEN8_WAIT_OFFSET(__ring, from) \ - (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ + (dev_priv->semaphore->node.start + \ GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) enum intel_engine_hangcheck_action { @@ -75,7 +75,6 @@ enum intel_engine_hangcheck_action { struct intel_engine_hangcheck { u64 acthd; - unsigned long user_interrupts; u32 seqno; int score; enum intel_engine_hangcheck_action action; @@ -84,9 +83,8 @@ struct intel_engine_hangcheck { }; struct intel_ring { - struct drm_i915_gem_object *obj; - void *vaddr; struct i915_vma *vma; + void *vaddr; struct intel_engine_cs *engine; struct list_head link; @@ -124,12 +122,12 @@ struct drm_i915_reg_table; * an option for future use. * size: size of the batch in DWORDS */ -struct i915_ctx_workarounds { +struct i915_ctx_workarounds { struct i915_wa_ctx_bb { u32 offset; u32 size; } indirect_ctx, per_ctx; - struct drm_i915_gem_object *obj; + struct i915_vma *vma; }; struct drm_i915_gem_request; @@ -147,8 +145,14 @@ struct intel_engine_cs { #define I915_NUM_ENGINES 5 #define _VCS(n) (VCS + (n)) unsigned int exec_id; - unsigned int hw_id; - unsigned int guc_id; /* XXX same as hw_id? */ + enum intel_engine_hw_id { + RCS_HW = 0, + VCS_HW, + BCS_HW, + VECS_HW, + VCS2_HW + } hw_id; + enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */ u64 fence_context; u32 mmio_base; unsigned int irq_shift; @@ -172,8 +176,7 @@ struct intel_engine_cs { * the overhead of waking that client is much preferred. */ struct intel_breadcrumbs { - struct task_struct *irq_seqno_bh; /* bh for user interrupts */ - unsigned long irq_wakeups; + struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */ bool irq_posted; spinlock_t lock; /* protects the lists of requests */ @@ -183,6 +186,9 @@ struct intel_engine_cs { struct task_struct *signaler; /* used for fence signalling */ struct drm_i915_gem_request *first_signal; struct timer_list fake_irq; /* used after a missed interrupt */ + struct timer_list hangcheck; /* detect missed interrupts */ + + unsigned long timeout; bool irq_enabled : 1; bool rpm_wakelock : 1; @@ -197,6 +203,7 @@ struct intel_engine_cs { struct intel_hw_status_page status_page; struct i915_ctx_workarounds wa_ctx; + struct i915_vma *scratch; u32 irq_keep_mask; /* always keep these interrupts */ u32 irq_enable_mask; /* bitmask to enable ring interrupt */ @@ -270,11 +277,14 @@ struct intel_engine_cs { u32 sync_seqno[I915_NUM_ENGINES-1]; union { +#define GEN6_SEMAPHORE_LAST VECS_HW +#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1) +#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0) struct { /* our mbox written by others */ - u32 wait[I915_NUM_ENGINES]; + u32 wait[GEN6_NUM_SEMAPHORES]; /* mboxes this ring signals to */ - i915_reg_t signal[I915_NUM_ENGINES]; + i915_reg_t signal[GEN6_NUM_SEMAPHORES]; } mbox; u64 signal_ggtt[I915_NUM_ENGINES]; }; @@ -310,7 +320,7 @@ struct intel_engine_cs { /* An RCU guarded pointer to the last request. No reference is * held to the request, users must carefully acquire a reference to - * the request using i915_gem_active_get_request_rcu(), or hold the + * the request using i915_gem_active_get_rcu(), or hold the * struct_mutex. */ struct i915_gem_active last_request; @@ -319,11 +329,6 @@ struct intel_engine_cs { struct intel_engine_hangcheck hangcheck; - struct { - struct drm_i915_gem_object *obj; - u32 gtt_offset; - } scratch; - bool needs_cmd_parser; /* @@ -475,11 +480,9 @@ void intel_ring_update_space(struct intel_ring *ring); void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno); -int intel_init_pipe_control(struct intel_engine_cs *engine, int size); -void intel_fini_pipe_control(struct intel_engine_cs *engine); - void intel_engine_setup_common(struct intel_engine_cs *engine); int intel_engine_init_common(struct intel_engine_cs *engine); +int intel_engine_create_scratch(struct intel_engine_cs *engine, int size); void intel_engine_cleanup_common(struct intel_engine_cs *engine); static inline int intel_engine_idle(struct intel_engine_cs *engine, @@ -515,7 +518,7 @@ int init_workarounds_ring(struct intel_engine_cs *engine); static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) { - return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; + return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; } /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ @@ -538,29 +541,35 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, struct intel_wait *wait); void intel_engine_enable_signaling(struct drm_i915_gem_request *request); -static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine) +static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) { - return READ_ONCE(engine->breadcrumbs.irq_seqno_bh); + return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh); } -static inline bool intel_engine_wakeup(struct intel_engine_cs *engine) +static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine) { bool wakeup = false; - struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh); + /* Note that for this not to dangerously chase a dangling pointer, - * the caller is responsible for ensure that the task remain valid for - * wake_up_process() i.e. that the RCU grace period cannot expire. + * we must hold the rcu_read_lock here. * * Also note that tsk is likely to be in !TASK_RUNNING state so an * early test for tsk->state != TASK_RUNNING before wake_up_process() * is unlikely to be beneficial. */ - if (tsk) - wakeup = wake_up_process(tsk); + if (intel_engine_has_waiter(engine)) { + struct task_struct *tsk; + + rcu_read_lock(); + tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); + if (tsk) + wakeup = wake_up_process(tsk); + rcu_read_unlock(); + } + return wakeup; } -void intel_engine_enable_fake_irq(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); unsigned int intel_kick_waiters(struct drm_i915_private *i915); unsigned int intel_kick_signalers(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 1c603bb..a1d73c2 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -592,6 +592,8 @@ void bxt_disable_dc9(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Disabling DC9\n"); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + intel_pps_unlock_regs_wa(dev_priv); } static void assert_csr_loaded(struct drm_i915_private *dev_priv) @@ -854,7 +856,7 @@ static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum skl_disp_power_wells power_well_id = power_well->data; - struct i915_power_well *cmn_a_well; + struct i915_power_well *cmn_a_well = NULL; if (power_well_id == BXT_DPIO_CMN_BC) { /* @@ -867,7 +869,7 @@ static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well)); - if (power_well_id == BXT_DPIO_CMN_BC) + if (cmn_a_well) intel_power_well_put(dev_priv, cmn_a_well); } @@ -1121,6 +1123,8 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) } i915_redisable_vga_power_on(&dev_priv->drm); + + intel_pps_unlock_regs_wa(dev_priv); } static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index cbdca7e4..366900d 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -203,21 +203,19 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(drm_plane); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); const int pipe = intel_plane->pipe; const int plane = intel_plane->plane + 1; - u32 plane_ctl, stride_div, stride; + u32 plane_ctl; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 surf_addr; - u32 tile_height, plane_offset, plane_size; + u32 surf_addr = plane_state->main.offset; unsigned int rotation = plane_state->base.rotation; - int x_offset, y_offset; + u32 stride = skl_plane_stride(fb, 0, rotation); int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); - uint32_t x = plane_state->base.src.x1 >> 16; - uint32_t y = plane_state->base.src.y1 >> 16; + uint32_t x = plane_state->main.x; + uint32_t y = plane_state->main.y; uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; @@ -230,15 +228,6 @@ skl_update_plane(struct drm_plane *drm_plane, plane_ctl |= skl_plane_ctl_rotation(rotation); - stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], - fb->pixel_format); - - /* Sizes are 0 based */ - src_w--; - src_h--; - crtc_w--; - crtc_h--; - if (key->flags) { I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value); I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value); @@ -250,28 +239,15 @@ skl_update_plane(struct drm_plane *drm_plane, else if (key->flags & I915_SET_COLORKEY_SOURCE) plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; - surf_addr = intel_plane_obj_offset(intel_plane, obj, 0); - - if (intel_rotation_90_or_270(rotation)) { - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); - - /* stride: Surface height in tiles */ - tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); - stride = DIV_ROUND_UP(fb->height, tile_height); - plane_size = (src_w << 16) | src_h; - x_offset = stride * tile_height - y - (src_h + 1); - y_offset = x; - } else { - stride = fb->pitches[0] / stride_div; - plane_size = (src_h << 16) | src_w; - x_offset = x; - y_offset = y; - } - plane_offset = y_offset << 16 | x_offset; + /* Sizes are 0 based */ + src_w--; + src_h--; + crtc_w--; + crtc_h--; - I915_WRITE(PLANE_OFFSET(pipe, plane), plane_offset); + I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x); I915_WRITE(PLANE_STRIDE(pipe, plane), stride); - I915_WRITE(PLANE_SIZE(pipe, plane), plane_size); + I915_WRITE(PLANE_SIZE(pipe, plane), (src_h << 16) | src_w); /* program plane scaler */ if (plane_state->scaler_id >= 0) { @@ -296,7 +272,8 @@ skl_update_plane(struct drm_plane *drm_plane, } I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); - I915_WRITE(PLANE_SURF(pipe, plane), surf_addr); + I915_WRITE(PLANE_SURF(pipe, plane), + intel_fb_gtt_offset(fb, rotation) + surf_addr); POSTING_READ(PLANE_SURF(pipe, plane)); } @@ -363,13 +340,11 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_plane->pipe; int plane = intel_plane->plane; u32 sprctl; u32 sprsurf_offset, linear_offset; unsigned int rotation = dplane->state->rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; @@ -431,7 +406,7 @@ vlv_update_plane(struct drm_plane *dplane, */ sprctl |= SP_GAMMA_ENABLE; - if (i915_gem_object_is_tiled(obj)) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) sprctl |= SP_TILED; /* Sizes are 0 based */ @@ -440,19 +415,18 @@ vlv_update_plane(struct drm_plane *dplane, crtc_w--; crtc_h--; - linear_offset = y * fb->pitches[0] + x * cpp; - sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= sprsurf_offset; + intel_add_fb_offsets(&x, &y, plane_state, 0); + sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); if (rotation == DRM_ROTATE_180) { sprctl |= SP_ROTATE_180; x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + src_w * cpp; } + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + if (key->flags) { I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value); I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value); @@ -468,7 +442,7 @@ vlv_update_plane(struct drm_plane *dplane, I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); - if (i915_gem_object_is_tiled(obj)) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x); else I915_WRITE(SPLINOFF(pipe, plane), linear_offset); @@ -477,8 +451,8 @@ vlv_update_plane(struct drm_plane *dplane, I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); I915_WRITE(SPCNTR(pipe, plane), sprctl); - I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) + - sprsurf_offset); + I915_WRITE(SPSURF(pipe, plane), + intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); POSTING_READ(SPSURF(pipe, plane)); } @@ -506,12 +480,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); enum pipe pipe = intel_plane->pipe; u32 sprctl, sprscale = 0; u32 sprsurf_offset, linear_offset; unsigned int rotation = plane_state->base.rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; @@ -553,7 +525,7 @@ ivb_update_plane(struct drm_plane *plane, */ sprctl |= SPRITE_GAMMA_ENABLE; - if (i915_gem_object_is_tiled(obj)) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) sprctl |= SPRITE_TILED; if (IS_HASWELL(dev) || IS_BROADWELL(dev)) @@ -573,10 +545,8 @@ ivb_update_plane(struct drm_plane *plane, if (crtc_w != src_w || crtc_h != src_h) sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; - linear_offset = y * fb->pitches[0] + x * cpp; - sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= sprsurf_offset; + intel_add_fb_offsets(&x, &y, plane_state, 0); + sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); if (rotation == DRM_ROTATE_180) { sprctl |= SPRITE_ROTATE_180; @@ -585,10 +555,11 @@ ivb_update_plane(struct drm_plane *plane, if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + src_w * cpp; } } + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + if (key->flags) { I915_WRITE(SPRKEYVAL(pipe), key->min_value); I915_WRITE(SPRKEYMAX(pipe), key->max_value); @@ -607,7 +578,7 @@ ivb_update_plane(struct drm_plane *plane, * register */ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) I915_WRITE(SPROFFSET(pipe), (y << 16) | x); - else if (i915_gem_object_is_tiled(obj)) + else if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); else I915_WRITE(SPRLINOFF(pipe), linear_offset); @@ -617,7 +588,7 @@ ivb_update_plane(struct drm_plane *plane, I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); I915_WRITE(SPRSURF(pipe), - i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); + intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); POSTING_READ(SPRSURF(pipe)); } @@ -647,12 +618,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_plane->pipe; u32 dvscntr, dvsscale; u32 dvssurf_offset, linear_offset; unsigned int rotation = plane_state->base.rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; @@ -694,7 +663,7 @@ ilk_update_plane(struct drm_plane *plane, */ dvscntr |= DVS_GAMMA_ENABLE; - if (i915_gem_object_is_tiled(obj)) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) dvscntr |= DVS_TILED; if (IS_GEN6(dev)) @@ -710,19 +679,18 @@ ilk_update_plane(struct drm_plane *plane, if (crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; - linear_offset = y * fb->pitches[0] + x * cpp; - dvssurf_offset = intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= dvssurf_offset; + intel_add_fb_offsets(&x, &y, plane_state, 0); + dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); if (rotation == DRM_ROTATE_180) { dvscntr |= DVS_ROTATE_180; x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + src_w * cpp; } + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + if (key->flags) { I915_WRITE(DVSKEYVAL(pipe), key->min_value); I915_WRITE(DVSKEYMAX(pipe), key->max_value); @@ -737,7 +705,7 @@ ilk_update_plane(struct drm_plane *plane, I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); - if (i915_gem_object_is_tiled(obj)) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); else I915_WRITE(DVSLINOFF(pipe), linear_offset); @@ -746,7 +714,7 @@ ilk_update_plane(struct drm_plane *plane, I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); I915_WRITE(DVSSURF(pipe), - i915_gem_obj_ggtt_offset(obj) + dvssurf_offset); + intel_fb_gtt_offset(fb, rotation) + dvssurf_offset); POSTING_READ(DVSSURF(pipe)); } @@ -785,6 +753,7 @@ intel_check_sprite_plane(struct drm_plane *plane, int hscale, vscale; int max_scale, min_scale; bool can_scale; + int ret; src->x1 = state->base.src_x; src->y1 = state->base.src_y; @@ -949,6 +918,12 @@ intel_check_sprite_plane(struct drm_plane *plane, dst->y1 = crtc_y; dst->y2 = crtc_y + crtc_h; + if (INTEL_GEN(dev) >= 9) { + ret = skl_check_plane_surface(state); + if (ret) + return ret; + } + return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 6190035..8ab9ce5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1151,7 +1151,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem); + ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem); out: ttm_bo_mem_put(bo, &tmp_mem); return ret; @@ -1179,7 +1179,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) return ret; - ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem); + ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem); if (ret) goto out; @@ -1297,7 +1297,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, /* Fallback to software copy. */ ret = ttm_bo_wait(bo, intr, no_wait_gpu); if (ret == 0) - ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem); + ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem); out: if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index d50c967..6a22de0 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -361,8 +361,8 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, qxl_move_null(bo, new_mem); return 0; } - return ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, new_mem); + return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, + new_mem); } static void qxl_bo_move_notify(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a89c480..4824f70 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1435,8 +1435,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); @@ -1636,8 +1636,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index cead089a..432cb46 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -389,22 +389,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[DP_DPCD_SIZE]; - int ret, i; + int ret; - for (i = 0; i < 7; i++) { - ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, - DP_DPCD_SIZE); - if (ret == DP_DPCD_SIZE) { - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); + ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, + DP_DPCD_SIZE); + if (ret == DP_DPCD_SIZE) { + memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), - dig_connector->dpcd); + DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), + dig_connector->dpcd); - radeon_dp_probe_oui(radeon_connector); + radeon_dp_probe_oui(radeon_connector); - return true; - } + return true; } + dig_connector->dpcd[0] = 0; return false; } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 0c1b9ff..b1784a1 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -1871,7 +1871,7 @@ int ci_mc_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data = NULL; const __le32 *new_fw_data = NULL; - u32 running, blackout = 0, tmp; + u32 running, tmp; u32 *io_mc_regs = NULL; const __le32 *new_io_mc_regs = NULL; int i, regs_size, ucode_size; @@ -1912,11 +1912,6 @@ int ci_mc_load_microcode(struct radeon_device *rdev) running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; if (running == 0) { - if (running) { - blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); WREG32(MC_SEQ_SUP_CNTL, 0x00000010); @@ -1964,9 +1959,6 @@ int ci_mc_load_microcode(struct radeon_device *rdev) break; udelay(1); } - - if (running) - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); } return 0; @@ -8215,7 +8207,7 @@ static void cik_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index cead228..48db935 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -2069,6 +2069,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xef4c #define UVD_UDEC_DB_ADDR_CONFIG 0xef50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xef54 +#define UVD_NO_OP 0xeffc #define UVD_LMI_EXT40_ADDR 0xf498 #define UVD_GP_SCRATCH4 0xf4e0 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index db275b7..0b6b576 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2878,9 +2878,8 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x7) != 3) { + if ((tmp & 0x7) != 0) { tmp &= ~0x7; - tmp |= 0x3; WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); @@ -5580,7 +5579,7 @@ static void evergreen_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index c8e3d39..f3d88ca 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -1523,6 +1523,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xef4c #define UVD_UDEC_DB_ADDR_CONFIG 0xef50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xef54 +#define UVD_NO_OP 0xeffc #define UVD_RBC_RB_RPTR 0xf690 #define UVD_RBC_RB_WPTR 0xf694 #define UVD_STATUS 0xf6bc diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 4a3d7ca..103fc86 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2062,7 +2062,7 @@ static void cayman_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 47eb49b..3c9fec8 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -1137,6 +1137,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xEF4C #define UVD_UDEC_DB_ADDR_CONFIG 0xEF50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54 +#define UVD_NO_OP 0xEFFC #define UVD_RBC_RB_RPTR 0xF690 #define UVD_RBC_RB_WPTR 0xF694 #define UVD_STATUS 0xf6bc diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 9247e7d..6406536 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3097,7 +3097,7 @@ static void r600_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 1e8495c..2e00a52 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -1490,6 +1490,7 @@ #define UVD_GPCOM_VCPU_DATA0 0xef10 #define UVD_GPCOM_VCPU_DATA1 0xef14 #define UVD_ENGINE_CNTL 0xef18 +#define UVD_NO_OP 0xeffc #define UVD_SEMA_CNTL 0xf400 #define UVD_RB_ARB_CTRL 0xf480 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5633ee3..1b0dcad 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -742,6 +742,7 @@ struct radeon_flip_work { struct work_struct unpin_work; struct radeon_device *rdev; int crtc_id; + u32 target_vblank; uint64_t base; struct drm_pending_vblank_event *event; struct radeon_bo *old_rbo; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index c3206fb..890171f 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -400,14 +400,13 @@ static void radeon_flip_work_func(struct work_struct *__work) struct radeon_flip_work *work = container_of(__work, struct radeon_flip_work, flip_work); struct radeon_device *rdev = work->rdev; + struct drm_device *dev = rdev->ddev; struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &radeon_crtc->base; unsigned long flags; int r; - int vpos, hpos, stat, min_udelay = 0; - unsigned repcnt = 4; - struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; + int vpos, hpos; down_read(&rdev->exclusive_lock); if (work->fence) { @@ -438,59 +437,25 @@ static void radeon_flip_work_func(struct work_struct *__work) work->fence = NULL; } + /* Wait until we're out of the vertical blank period before the one + * targeted by the flip + */ + while (radeon_crtc->enabled && + (radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0, + &vpos, &hpos, NULL, NULL, + &crtc->hwmode) + & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && + (int)(work->target_vblank - + dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0) + usleep_range(1000, 2000); + /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); /* set the proper interrupt */ radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); - /* If this happens to execute within the "virtually extended" vblank - * interval before the start of the real vblank interval then it needs - * to delay programming the mmio flip until the real vblank is entered. - * This prevents completing a flip too early due to the way we fudge - * our vblank counter and vblank timestamps in order to work around the - * problem that the hw fires vblank interrupts before actual start of - * vblank (when line buffer refilling is done for a frame). It - * complements the fudging logic in radeon_get_crtc_scanoutpos() for - * timestamping and radeon_get_vblank_counter_kms() for vblank counts. - * - * In practice this won't execute very often unless on very fast - * machines because the time window for this to happen is very small. - */ - while (radeon_crtc->enabled && --repcnt) { - /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank - * start in hpos, and to the "fudged earlier" vblank start in - * vpos. - */ - stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id, - GET_DISTANCE_TO_VBLANKSTART, - &vpos, &hpos, NULL, NULL, - &crtc->hwmode); - - if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != - (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || - !(vpos >= 0 && hpos <= 0)) - break; - - /* Sleep at least until estimated real start of hw vblank */ - min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); - if (min_udelay > vblank->framedur_ns / 2000) { - /* Don't wait ridiculously long - something is wrong */ - repcnt = 0; - break; - } - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - usleep_range(min_udelay, 2 * min_udelay); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - }; - - if (!repcnt) - DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " - "framedur %d, linedur %d, stat %d, vpos %d, " - "hpos %d\n", work->crtc_id, min_udelay, - vblank->framedur_ns / 1000, - vblank->linedur_ns / 1000, stat, vpos, hpos); - /* do the flip (mmio) */ radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async); @@ -499,10 +464,11 @@ static void radeon_flip_work_func(struct work_struct *__work) up_read(&rdev->exclusive_lock); } -static int radeon_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) +static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, + uint32_t target) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; @@ -599,12 +565,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, base &= ~7; } work->base = base; - - r = drm_crtc_vblank_get(crtc); - if (r) { - DRM_ERROR("failed to get vblank before flip\n"); - goto pflip_cleanup; - } + work->target_vblank = target - drm_crtc_vblank_count(crtc) + + dev->driver->get_vblank_counter(dev, work->crtc_id); /* We borrow the event spin lock for protecting flip_work */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -613,7 +575,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); r = -EBUSY; - goto vblank_cleanup; + goto pflip_cleanup; } radeon_crtc->flip_status = RADEON_FLIP_PENDING; radeon_crtc->flip_work = work; @@ -626,9 +588,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, queue_work(radeon_crtc->flip_queue, &work->flip_work); return 0; -vblank_cleanup: - drm_crtc_vblank_put(crtc); - pflip_cleanup: if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); @@ -697,7 +656,7 @@ static const struct drm_crtc_funcs radeon_crtc_funcs = { .gamma_set = radeon_crtc_gamma_set, .set_config = radeon_crtc_set_config, .destroy = radeon_crtc_destroy, - .page_flip = radeon_crtc_page_flip, + .page_flip_target = radeon_crtc_page_flip_target, }; static void radeon_crtc_init(struct drm_device *dev, int index) diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c index db64e00..2d46564 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c +++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c @@ -164,7 +164,6 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg } if (tmp & AUX_SW_RX_TIMEOUT) { - DRM_DEBUG_KMS("dp_aux_ch timed out\n"); ret = -ETIMEDOUT; goto done; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 90f2ff2..07e4493 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -95,9 +95,10 @@ * 2.44.0 - SET_APPEND_CNT packet3 support * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI * 2.46.0 - Add PFP_SYNC_ME support on evergreen + * 2.47.0 - Add UVD_NO_OP register support */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 46 +#define KMS_DRIVER_MINOR 47 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 9590bcd..021aa00 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -938,10 +938,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, "Radeon i2c hw bus %s", name); i2c->adapter.algo = &radeon_i2c_algo; ret = i2c_add_adapter(&i2c->adapter); - if (ret) { - DRM_ERROR("Failed to register hw i2c %s\n", name); + if (ret) goto out_free; - } } else if (rec->hw_capable && radeon_hw_i2c && ASIC_IS_DCE3(rdev)) { @@ -950,10 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, "Radeon i2c hw bus %s", name); i2c->adapter.algo = &radeon_atom_i2c_algo; ret = i2c_add_adapter(&i2c->adapter); - if (ret) { - DRM_ERROR("Failed to register hw i2c %s\n", name); + if (ret) goto out_free; - } } else { /* set the radeon bit adapter */ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0c00e19..93414ac 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -346,7 +346,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -379,7 +379,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } @@ -444,8 +444,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); if (r) { return r; } diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 73dfe01..0cd0e7b 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -669,6 +669,7 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, return r; break; case UVD_ENGINE_CNTL: + case UVD_NO_OP: break; default: DRM_ERROR("Invalid reg 0x%X!\n", @@ -753,8 +754,10 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ib.ptr[3] = addr >> 32; ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0); ib.ptr[5] = 0; - for (i = 6; i < 16; ++i) - ib.ptr[i] = PACKET2(0); + for (i = 6; i < 16; i += 2) { + ib.ptr[i] = PACKET0(UVD_NO_OP, 0); + ib.ptr[i+1] = 0; + } ib.length_dw = 16; r = radeon_ib_schedule(rdev, &ib, NULL, false); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index c55d653..76c55c5 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -406,9 +406,8 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x7) != 3) { + if ((tmp & 0x7) != 0) { tmp &= ~0x7; - tmp |= 0x3; WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 1c120a4..729ae58 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1738,7 +1738,7 @@ static void rv770_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 9ef2064..0271f4c 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -387,6 +387,7 @@ #define UVD_UDEC_TILING_CONFIG 0xef40 #define UVD_UDEC_DB_TILING_CONFIG 0xef44 #define UVD_UDEC_DBW_TILING_CONFIG 0xef48 +#define UVD_NO_OP 0xeffc #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 2523ca9..7ee9aaf 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1547,7 +1547,7 @@ int si_mc_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data = NULL; const __le32 *new_fw_data = NULL; - u32 running, blackout = 0; + u32 running; u32 *io_mc_regs = NULL; const __le32 *new_io_mc_regs = NULL; int i, regs_size, ucode_size; @@ -1598,11 +1598,6 @@ int si_mc_load_microcode(struct radeon_device *rdev) running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; if (running == 0) { - if (running) { - blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); WREG32(MC_SEQ_SUP_CNTL, 0x00000010); @@ -1641,9 +1636,6 @@ int si_mc_load_microcode(struct radeon_device *rdev) break; udelay(1); } - - if (running) - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); } return 0; @@ -6928,7 +6920,7 @@ static void si_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index d1a7b58..eb220ee 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -1559,6 +1559,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xEF4C #define UVD_UDEC_DB_ADDR_CONFIG 0xEF50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54 +#define UVD_NO_OP 0xEFFC #define UVD_RBC_RB_RPTR 0xF690 #define UVD_RBC_RB_WPTR 0xF694 #define UVD_STATUS 0xf6bc diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index 05d0713..9746365 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -3,7 +3,7 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \ - rockchip_drm_gem.o rockchip_drm_vop.o + rockchip_drm_gem.o rockchip_drm_psr.o rockchip_drm_vop.o rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 0e63ee2..e83be15 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -32,6 +32,7 @@ #include <drm/bridge/analogix_dp.h> #include "rockchip_drm_drv.h" +#include "rockchip_drm_psr.h" #include "rockchip_drm_vop.h" #define RK3288_GRF_SOC_CON6 0x25c @@ -41,6 +42,8 @@ #define HIWORD_UPDATE(val, mask) (val | (mask) << 16) +#define PSR_WAIT_LINE_FLAG_TIMEOUT_MS 100 + #define to_dp(nm) container_of(nm, struct rockchip_dp_device, nm) /** @@ -68,11 +71,62 @@ struct rockchip_dp_device { struct regmap *grf; struct reset_control *rst; + struct work_struct psr_work; + spinlock_t psr_lock; + unsigned int psr_state; + const struct rockchip_dp_chip_data *data; struct analogix_dp_plat_data plat_data; }; +static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled) +{ + struct rockchip_dp_device *dp = to_dp(encoder); + unsigned long flags; + + dev_dbg(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit"); + + spin_lock_irqsave(&dp->psr_lock, flags); + if (enabled) + dp->psr_state = EDP_VSC_PSR_STATE_ACTIVE; + else + dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE; + + schedule_work(&dp->psr_work); + spin_unlock_irqrestore(&dp->psr_lock, flags); +} + +static void analogix_dp_psr_work(struct work_struct *work) +{ + struct rockchip_dp_device *dp = + container_of(work, typeof(*dp), psr_work); + struct drm_crtc *crtc = dp->encoder.crtc; + int psr_state = dp->psr_state; + int vact_end; + int ret; + unsigned long flags; + + if (!crtc) + return; + + vact_end = crtc->mode.vtotal - crtc->mode.vsync_start + crtc->mode.vdisplay; + + ret = rockchip_drm_wait_line_flag(dp->encoder.crtc, vact_end, + PSR_WAIT_LINE_FLAG_TIMEOUT_MS); + if (ret) { + dev_err(dp->dev, "line flag interrupt did not arrive\n"); + return; + } + + spin_lock_irqsave(&dp->psr_lock, flags); + if (psr_state == EDP_VSC_PSR_STATE_ACTIVE) + analogix_dp_enable_psr(dp->dev); + else + analogix_dp_disable_psr(dp->dev); + spin_unlock_irqrestore(&dp->psr_lock, flags); +} + static int rockchip_dp_pre_init(struct rockchip_dp_device *dp) { reset_control_assert(dp->rst); @@ -87,6 +141,8 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data) struct rockchip_dp_device *dp = to_dp(plat_data); int ret; + cancel_work_sync(&dp->psr_work); + ret = clk_prepare_enable(dp->pclk); if (ret < 0) { dev_err(dp->dev, "failed to enable pclk %d\n", ret); @@ -342,12 +398,22 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, dp->plat_data.power_off = rockchip_dp_powerdown; dp->plat_data.get_modes = rockchip_dp_get_modes; + spin_lock_init(&dp->psr_lock); + dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE; + INIT_WORK(&dp->psr_work, analogix_dp_psr_work); + + rockchip_drm_psr_register(&dp->encoder, analogix_dp_psr_set); + return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); } static void rockchip_dp_unbind(struct device *dev, struct device *master, void *data) { + struct rockchip_dp_device *dp = dev_get_drvdata(dev); + + rockchip_drm_psr_unregister(&dp->encoder); + return analogix_dp_unbind(dev, master, data); } @@ -381,10 +447,8 @@ static int rockchip_dp_probe(struct platform_device *pdev) panel = of_drm_find_panel(panel_node); of_node_put(panel_node); - if (!panel) { - DRM_ERROR("failed to find panel\n"); + if (!panel) return -EPROBE_DEFER; - } } dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index a822d49..76eaf1d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -156,6 +156,9 @@ static int rockchip_drm_bind(struct device *dev) drm_dev->dev_private = private; + INIT_LIST_HEAD(&private->psr_list); + spin_lock_init(&private->psr_list_lock); + drm_mode_config_init(drm_dev); rockchip_drm_mode_config_init(drm_dev); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index ea39329..5c69845 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -61,6 +61,9 @@ struct rockchip_drm_private { struct drm_gem_object *fbdev_bo; const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; struct drm_atomic_state *state; + + struct list_head psr_list; + spinlock_t psr_list_lock; }; int rockchip_register_crtc_funcs(struct drm_crtc *crtc, @@ -70,4 +73,7 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, struct device *dev); void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, struct device *dev); +int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num, + unsigned int mstimeout); + #endif /* _ROCKCHIP_DRM_DRV_H_ */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 7ca8f34..60bcc48 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -22,6 +22,7 @@ #include "rockchip_drm_drv.h" #include "rockchip_drm_fb.h" #include "rockchip_drm_gem.h" +#include "rockchip_drm_psr.h" #define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb) @@ -63,9 +64,20 @@ static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb, rockchip_fb->obj[0], handle); } +static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb, + struct drm_file *file, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips) +{ + rockchip_drm_psr_flush(fb->dev); + return 0; +} + static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = { .destroy = rockchip_drm_fb_destroy, .create_handle = rockchip_drm_fb_create_handle, + .dirty = rockchip_drm_fb_dirty, }; static struct rockchip_drm_fb * diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c new file mode 100644 index 0000000..c6ac5d0 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c @@ -0,0 +1,245 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author: Yakir Yang <ykk@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "rockchip_drm_drv.h" +#include "rockchip_drm_psr.h" + +#define PSR_FLUSH_TIMEOUT msecs_to_jiffies(3000) /* 3 seconds */ + +enum psr_state { + PSR_FLUSH, + PSR_ENABLE, + PSR_DISABLE, +}; + +struct psr_drv { + struct list_head list; + struct drm_encoder *encoder; + + spinlock_t lock; + enum psr_state state; + + struct timer_list flush_timer; + + void (*set)(struct drm_encoder *encoder, bool enable); +}; + +static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc) +{ + struct rockchip_drm_private *drm_drv = crtc->dev->dev_private; + struct psr_drv *psr; + unsigned long flags; + + spin_lock_irqsave(&drm_drv->psr_list_lock, flags); + list_for_each_entry(psr, &drm_drv->psr_list, list) { + if (psr->encoder->crtc == crtc) + goto out; + } + psr = ERR_PTR(-ENODEV); + +out: + spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags); + return psr; +} + +static void psr_set_state_locked(struct psr_drv *psr, enum psr_state state) +{ + /* + * Allowed finite state machine: + * + * PSR_ENABLE < = = = = = > PSR_FLUSH + * | ^ | + * | | | + * v | | + * PSR_DISABLE < - - - - - - - - - + */ + if (state == psr->state) + return; + + /* Requesting a flush when disabled is a noop */ + if (state == PSR_FLUSH && psr->state == PSR_DISABLE) + return; + + psr->state = state; + + /* Already disabled in flush, change the state, but not the hardware */ + if (state == PSR_DISABLE && psr->state == PSR_FLUSH) + return; + + /* Actually commit the state change to hardware */ + switch (psr->state) { + case PSR_ENABLE: + psr->set(psr->encoder, true); + break; + + case PSR_DISABLE: + case PSR_FLUSH: + psr->set(psr->encoder, false); + break; + } +} + +static void psr_set_state(struct psr_drv *psr, enum psr_state state) +{ + unsigned long flags; + + spin_lock_irqsave(&psr->lock, flags); + psr_set_state_locked(psr, state); + spin_unlock_irqrestore(&psr->lock, flags); +} + +static void psr_flush_handler(unsigned long data) +{ + struct psr_drv *psr = (struct psr_drv *)data; + unsigned long flags; + + /* If the state has changed since we initiated the flush, do nothing */ + spin_lock_irqsave(&psr->lock, flags); + if (psr->state == PSR_FLUSH) + psr_set_state_locked(psr, PSR_ENABLE); + spin_unlock_irqrestore(&psr->lock, flags); +} + +/** + * rockchip_drm_psr_enable - enable the encoder PSR which bind to given CRTC + * @crtc: CRTC to obtain the PSR encoder + * + * Returns: + * Zero on success, negative errno on failure. + */ +int rockchip_drm_psr_enable(struct drm_crtc *crtc) +{ + struct psr_drv *psr = find_psr_by_crtc(crtc); + + if (IS_ERR(psr)) + return PTR_ERR(psr); + + psr_set_state(psr, PSR_ENABLE); + return 0; +} +EXPORT_SYMBOL(rockchip_drm_psr_enable); + +/** + * rockchip_drm_psr_disable - disable the encoder PSR which bind to given CRTC + * @crtc: CRTC to obtain the PSR encoder + * + * Returns: + * Zero on success, negative errno on failure. + */ +int rockchip_drm_psr_disable(struct drm_crtc *crtc) +{ + struct psr_drv *psr = find_psr_by_crtc(crtc); + + if (IS_ERR(psr)) + return PTR_ERR(psr); + + psr_set_state(psr, PSR_DISABLE); + return 0; +} +EXPORT_SYMBOL(rockchip_drm_psr_disable); + +/** + * rockchip_drm_psr_flush - force to flush all registered PSR encoders + * @dev: drm device + * + * Disable the PSR function for all registered encoders, and then enable the + * PSR function back after PSR_FLUSH_TIMEOUT. If encoder PSR state have been + * changed during flush time, then keep the state no change after flush + * timeout. + * + * Returns: + * Zero on success, negative errno on failure. + */ +void rockchip_drm_psr_flush(struct drm_device *dev) +{ + struct rockchip_drm_private *drm_drv = dev->dev_private; + struct psr_drv *psr; + unsigned long flags; + + spin_lock_irqsave(&drm_drv->psr_list_lock, flags); + list_for_each_entry(psr, &drm_drv->psr_list, list) { + mod_timer(&psr->flush_timer, + round_jiffies_up(jiffies + PSR_FLUSH_TIMEOUT)); + + psr_set_state(psr, PSR_FLUSH); + } + spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags); +} +EXPORT_SYMBOL(rockchip_drm_psr_flush); + +/** + * rockchip_drm_psr_register - register encoder to psr driver + * @encoder: encoder that obtain the PSR function + * @psr_set: call back to set PSR state + * + * Returns: + * Zero on success, negative errno on failure. + */ +int rockchip_drm_psr_register(struct drm_encoder *encoder, + void (*psr_set)(struct drm_encoder *, bool enable)) +{ + struct rockchip_drm_private *drm_drv = encoder->dev->dev_private; + struct psr_drv *psr; + unsigned long flags; + + if (!encoder || !psr_set) + return -EINVAL; + + psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL); + if (!psr) + return -ENOMEM; + + setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr); + spin_lock_init(&psr->lock); + + psr->state = PSR_DISABLE; + psr->encoder = encoder; + psr->set = psr_set; + + spin_lock_irqsave(&drm_drv->psr_list_lock, flags); + list_add_tail(&psr->list, &drm_drv->psr_list); + spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags); + + return 0; +} +EXPORT_SYMBOL(rockchip_drm_psr_register); + +/** + * rockchip_drm_psr_unregister - unregister encoder to psr driver + * @encoder: encoder that obtain the PSR function + * @psr_set: call back to set PSR state + * + * Returns: + * Zero on success, negative errno on failure. + */ +void rockchip_drm_psr_unregister(struct drm_encoder *encoder) +{ + struct rockchip_drm_private *drm_drv = encoder->dev->dev_private; + struct psr_drv *psr, *n; + unsigned long flags; + + spin_lock_irqsave(&drm_drv->psr_list_lock, flags); + list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) { + if (psr->encoder == encoder) { + del_timer(&psr->flush_timer); + list_del(&psr->list); + kfree(psr); + } + } + spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags); +} +EXPORT_SYMBOL(rockchip_drm_psr_unregister); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h new file mode 100644 index 0000000..c35b688 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author: Yakir Yang <ykk@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ROCKCHIP_DRM_PSR___ +#define __ROCKCHIP_DRM_PSR___ + +void rockchip_drm_psr_flush(struct drm_device *dev); +int rockchip_drm_psr_enable(struct drm_crtc *crtc); +int rockchip_drm_psr_disable(struct drm_crtc *crtc); + +int rockchip_drm_psr_register(struct drm_encoder *encoder, + void (*psr_set)(struct drm_encoder *, bool enable)); +void rockchip_drm_psr_unregister(struct drm_encoder *encoder); + +#endif /* __ROCKCHIP_DRM_PSR__ */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index efbc41a..d486049 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -34,17 +34,21 @@ #include "rockchip_drm_drv.h" #include "rockchip_drm_gem.h" #include "rockchip_drm_fb.h" +#include "rockchip_drm_psr.h" #include "rockchip_drm_vop.h" -#define __REG_SET_RELAXED(x, off, mask, shift, v) \ - vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift) -#define __REG_SET_NORMAL(x, off, mask, shift, v) \ - vop_mask_write(x, off, (mask) << shift, (v) << shift) +#define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \ + vop_mask_write(x, off, mask, shift, v, write_mask, true) + +#define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \ + vop_mask_write(x, off, mask, shift, v, write_mask, false) #define REG_SET(x, base, reg, v, mode) \ - __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) + __REG_SET_##mode(x, base + reg.offset, \ + reg.mask, reg.shift, v, reg.write_mask) #define REG_SET_MASK(x, base, reg, mask, v, mode) \ - __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v) + __REG_SET_##mode(x, base + reg.offset, \ + mask, reg.shift, v, reg.write_mask) #define VOP_WIN_SET(x, win, name, v) \ REG_SET(x, win->base, win->phy->name, v, RELAXED) @@ -106,6 +110,7 @@ struct vop { struct device *dev; struct drm_device *drm_dev; bool is_enabled; + bool vblank_active; /* mutex vsync_ work */ struct mutex vsync_mutex; @@ -116,6 +121,8 @@ struct vop { /* protected by dev->event_lock */ struct drm_pending_vblank_event *event; + struct completion line_flag_completion; + const struct vop_data *data; uint32_t *regsbak; @@ -162,27 +169,25 @@ static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base, } static inline void vop_mask_write(struct vop *vop, uint32_t offset, - uint32_t mask, uint32_t v) + uint32_t mask, uint32_t shift, uint32_t v, + bool write_mask, bool relaxed) { - if (mask) { - uint32_t cached_val = vop->regsbak[offset >> 2]; - - cached_val = (cached_val & ~mask) | v; - writel(cached_val, vop->regs + offset); - vop->regsbak[offset >> 2] = cached_val; - } -} + if (!mask) + return; -static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset, - uint32_t mask, uint32_t v) -{ - if (mask) { + if (write_mask) { + v = ((v << shift) & 0xffff) | (mask << (shift + 16)); + } else { uint32_t cached_val = vop->regsbak[offset >> 2]; - cached_val = (cached_val & ~mask) | v; - writel_relaxed(cached_val, vop->regs + offset); - vop->regsbak[offset >> 2] = cached_val; + v = (cached_val & ~(mask << shift)) | ((v & mask) << shift); + vop->regsbak[offset >> 2] = v; } + + if (relaxed) + writel_relaxed(v, vop->regs + offset); + else + writel(v, vop->regs + offset); } static inline uint32_t vop_get_intr_type(struct vop *vop, @@ -428,6 +433,71 @@ static void vop_dsp_hold_valid_irq_disable(struct vop *vop) spin_unlock_irqrestore(&vop->irq_lock, flags); } +/* + * (1) each frame starts at the start of the Vsync pulse which is signaled by + * the "FRAME_SYNC" interrupt. + * (2) the active data region of each frame ends at dsp_vact_end + * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num, + * to get "LINE_FLAG" interrupt at the end of the active on screen data. + * + * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end + * Interrupts + * LINE_FLAG -------------------------------+ + * FRAME_SYNC ----+ | + * | | + * v v + * | Vsync | Vbp | Vactive | Vfp | + * ^ ^ ^ ^ + * | | | | + * | | | | + * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END + * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END + * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END + * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END + */ +static bool vop_line_flag_irq_is_enabled(struct vop *vop) +{ + uint32_t line_flag_irq; + unsigned long flags; + + spin_lock_irqsave(&vop->irq_lock, flags); + + line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR); + + spin_unlock_irqrestore(&vop->irq_lock, flags); + + return !!line_flag_irq; +} + +static void vop_line_flag_irq_enable(struct vop *vop, int line_num) +{ + unsigned long flags; + + if (WARN_ON(!vop->is_enabled)) + return; + + spin_lock_irqsave(&vop->irq_lock, flags); + + VOP_CTRL_SET(vop, line_flag_num[0], line_num); + VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1); + + spin_unlock_irqrestore(&vop->irq_lock, flags); +} + +static void vop_line_flag_irq_disable(struct vop *vop) +{ + unsigned long flags; + + if (WARN_ON(!vop->is_enabled)) + return; + + spin_lock_irqsave(&vop->irq_lock, flags); + + VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0); + + spin_unlock_irqrestore(&vop->irq_lock, flags); +} + static int vop_enable(struct drm_crtc *crtc) { struct vop *vop = to_vop(crtc); @@ -849,6 +919,8 @@ static int vop_crtc_enable_vblank(struct drm_crtc *crtc) spin_unlock_irqrestore(&vop->irq_lock, flags); + rockchip_drm_psr_disable(&vop->crtc); + return 0; } @@ -865,6 +937,8 @@ static void vop_crtc_disable_vblank(struct drm_crtc *crtc) VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0); spin_unlock_irqrestore(&vop->irq_lock, flags); + + rockchip_drm_psr_enable(&vop->crtc); } static void vop_crtc_wait_for_update(struct drm_crtc *crtc) @@ -908,7 +982,7 @@ static void vop_crtc_enable(struct drm_crtc *crtc) u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start; u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start; u16 vact_end = vact_st + vdisplay; - uint32_t val; + uint32_t pin_pol, val; int ret; WARN_ON(vop->event); @@ -955,21 +1029,26 @@ static void vop_crtc_enable(struct drm_crtc *crtc) vop_dsp_hold_valid_irq_disable(vop); } - val = 0x8; - val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1; - val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1); - VOP_CTRL_SET(vop, pin_pol, val); + pin_pol = 0x8; + pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1; + pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1); + VOP_CTRL_SET(vop, pin_pol, pin_pol); + switch (s->output_type) { case DRM_MODE_CONNECTOR_LVDS: VOP_CTRL_SET(vop, rgb_en, 1); + VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol); break; case DRM_MODE_CONNECTOR_eDP: + VOP_CTRL_SET(vop, edp_pin_pol, pin_pol); VOP_CTRL_SET(vop, edp_en, 1); break; case DRM_MODE_CONNECTOR_HDMIA: + VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol); VOP_CTRL_SET(vop, hdmi_en, 1); break; case DRM_MODE_CONNECTOR_DSI: + VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol); VOP_CTRL_SET(vop, mipi_en, 1); break; default: @@ -1016,10 +1095,11 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc, struct vop *vop = to_vop(crtc); spin_lock_irq(&crtc->dev->event_lock); - if (crtc->state->event) { - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - WARN_ON(vop->event); + vop->vblank_active = true; + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + WARN_ON(vop->event); + if (crtc->state->event) { vop->event = crtc->state->event; crtc->state->event = NULL; } @@ -1106,12 +1186,14 @@ static void vop_handle_vblank(struct vop *vop) spin_lock_irqsave(&drm->event_lock, flags); if (vop->event) { - drm_crtc_send_vblank_event(crtc, vop->event); - drm_crtc_vblank_put(crtc); vop->event = NULL; } + if (vop->vblank_active) { + vop->vblank_active = false; + drm_crtc_vblank_put(crtc); + } spin_unlock_irqrestore(&drm->event_lock, flags); if (!completion_done(&vop->wait_update_complete)) @@ -1149,6 +1231,12 @@ static irqreturn_t vop_isr(int irq, void *data) ret = IRQ_HANDLED; } + if (active_irqs & LINE_FLAG_INTR) { + complete(&vop->line_flag_completion); + active_irqs &= ~LINE_FLAG_INTR; + ret = IRQ_HANDLED; + } + if (active_irqs & FS_INTR) { drm_crtc_handle_vblank(crtc); vop_handle_vblank(vop); @@ -1250,6 +1338,7 @@ static int vop_create_crtc(struct vop *vop) init_completion(&vop->dsp_hold_completion); init_completion(&vop->wait_update_complete); + init_completion(&vop->line_flag_completion); crtc->port = port; rockchip_register_crtc_funcs(crtc, &private_crtc_funcs); @@ -1377,6 +1466,7 @@ static int vop_initial(struct vop *vop) clk_disable(vop->aclk); vop->is_enabled = false; + vop->vblank_active = false; return 0; @@ -1406,6 +1496,49 @@ static void vop_win_init(struct vop *vop) } } +/** + * rockchip_drm_wait_line_flag - acqiure the give line flag event + * @crtc: CRTC to enable line flag + * @line_num: interested line number + * @mstimeout: millisecond for timeout + * + * Driver would hold here until the interested line flag interrupt have + * happened or timeout to wait. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num, + unsigned int mstimeout) +{ + struct vop *vop = to_vop(crtc); + unsigned long jiffies_left; + + if (!crtc || !vop->is_enabled) + return -ENODEV; + + if (line_num > crtc->mode.vtotal || mstimeout <= 0) + return -EINVAL; + + if (vop_line_flag_irq_is_enabled(vop)) + return -EBUSY; + + reinit_completion(&vop->line_flag_completion); + vop_line_flag_irq_enable(vop, line_num); + + jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion, + msecs_to_jiffies(mstimeout)); + vop_line_flag_irq_disable(vop); + + if (jiffies_left == 0) { + dev_err(vop->dev, "Timeout waiting for IRQ\n"); + return -ETIMEDOUT; + } + + return 0; +} +EXPORT_SYMBOL(rockchip_drm_wait_line_flag); + static int vop_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); @@ -1474,6 +1607,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data) return ret; pm_runtime_enable(&pdev->dev); + return 0; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 071ff0b..1dbc526 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -33,6 +33,7 @@ struct vop_reg { uint32_t offset; uint32_t shift; uint32_t mask; + bool write_mask; }; struct vop_ctrl { @@ -48,6 +49,10 @@ struct vop_ctrl { struct vop_reg dither_down; struct vop_reg dither_up; struct vop_reg pin_pol; + struct vop_reg rgb_pin_pol; + struct vop_reg hdmi_pin_pol; + struct vop_reg edp_pin_pol; + struct vop_reg mipi_pin_pol; struct vop_reg htotal_pw; struct vop_reg hact_st_end; @@ -56,6 +61,8 @@ struct vop_ctrl { struct vop_reg hpost_st_end; struct vop_reg vpost_st_end; + struct vop_reg line_flag_num[2]; + struct vop_reg cfg_done; }; diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index b7f59c4..35c51f3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -23,7 +23,14 @@ #define VOP_REG(off, _mask, s) \ {.offset = off, \ .mask = _mask, \ - .shift = s,} + .shift = s, \ + .write_mask = false,} + +#define VOP_REG_MASK(off, _mask, s) \ + {.offset = off, \ + .mask = _mask, \ + .shift = s, \ + .write_mask = true,} static const uint32_t formats_win_full[] = { DRM_FORMAT_XRGB8888, @@ -50,6 +57,89 @@ static const uint32_t formats_win_lite[] = { DRM_FORMAT_BGR565, }; +static const struct vop_scl_regs rk3036_win_scl = { + .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0), + .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16), + .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0), + .scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16), +}; + +static const struct vop_win_phy rk3036_win0_data = { + .scl = &rk3036_win_scl, + .data_formats = formats_win_full, + .nformats = ARRAY_SIZE(formats_win_full), + .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0), + .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3), + .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15), + .act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0), + .dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0), + .dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0), + .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0), + .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0), + .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0), + .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16), +}; + +static const struct vop_win_phy rk3036_win1_data = { + .data_formats = formats_win_lite, + .nformats = ARRAY_SIZE(formats_win_lite), + .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1), + .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6), + .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19), + .act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0), + .dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0), + .dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0), + .yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0), + .yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0), +}; + +static const struct vop_win_data rk3036_vop_win_data[] = { + { .base = 0x00, .phy = &rk3036_win0_data, + .type = DRM_PLANE_TYPE_PRIMARY }, + { .base = 0x00, .phy = &rk3036_win1_data, + .type = DRM_PLANE_TYPE_CURSOR }, +}; + +static const int rk3036_vop_intrs[] = { + DSP_HOLD_VALID_INTR, + FS_INTR, + LINE_FLAG_INTR, + BUS_ERROR_INTR, +}; + +static const struct vop_intr rk3036_intr = { + .intrs = rk3036_vop_intrs, + .nintrs = ARRAY_SIZE(rk3036_vop_intrs), + .status = VOP_REG(RK3036_INT_STATUS, 0xf, 0), + .enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4), + .clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8), +}; + +static const struct vop_ctrl rk3036_ctrl_data = { + .standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30), + .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0), + .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4), + .htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0), + .hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0), + .vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0), + .vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0), + .line_flag_num[0] = VOP_REG(RK3036_INT_STATUS, 0xfff, 12), + .cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0), +}; + +static const struct vop_reg_data rk3036_vop_init_reg_table[] = { + {RK3036_DSP_CTRL1, 0x00000000}, +}; + +static const struct vop_data rk3036_vop = { + .init_table = rk3036_vop_init_reg_table, + .table_size = ARRAY_SIZE(rk3036_vop_init_reg_table), + .ctrl = &rk3036_ctrl_data, + .intr = &rk3036_intr, + .win = rk3036_vop_win_data, + .win_size = ARRAY_SIZE(rk3036_vop_win_data), +}; + static const struct vop_scl_extension rk3288_win_full_scl_ext = { .cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31), .cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30), @@ -133,6 +223,7 @@ static const struct vop_ctrl rk3288_ctrl_data = { .vact_st_end = VOP_REG(RK3288_DSP_VACT_ST_END, 0x1fff1fff, 0), .hpost_st_end = VOP_REG(RK3288_POST_DSP_HACT_INFO, 0x1fff1fff, 0), .vpost_st_end = VOP_REG(RK3288_POST_DSP_VACT_INFO, 0x1fff1fff, 0), + .line_flag_num[0] = VOP_REG(RK3288_INTR_CTRL0, 0x1fff, 12), .cfg_done = VOP_REG(RK3288_REG_CFG_DONE, 0x1, 0), }; @@ -190,93 +281,104 @@ static const struct vop_data rk3288_vop = { .win_size = ARRAY_SIZE(rk3288_vop_win_data), }; -static const struct vop_scl_regs rk3036_win_scl = { - .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0), - .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16), - .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0), - .scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16), -}; - -static const struct vop_win_phy rk3036_win0_data = { - .scl = &rk3036_win_scl, - .data_formats = formats_win_full, - .nformats = ARRAY_SIZE(formats_win_full), - .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0), - .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3), - .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15), - .act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0), - .dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0), - .dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0), - .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0), - .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0), - .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0), - .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16), +static const struct vop_ctrl rk3399_ctrl_data = { + .standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22), + .gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23), + .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12), + .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13), + .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14), + .mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15), + .dither_down = VOP_REG(RK3399_DSP_CTRL1, 0xf, 1), + .dither_up = VOP_REG(RK3399_DSP_CTRL1, 0x1, 6), + .data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19), + .out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0), + .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16), + .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20), + .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24), + .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28), + .htotal_pw = VOP_REG(RK3399_DSP_HTOTAL_HS_END, 0x1fff1fff, 0), + .hact_st_end = VOP_REG(RK3399_DSP_HACT_ST_END, 0x1fff1fff, 0), + .vtotal_pw = VOP_REG(RK3399_DSP_VTOTAL_VS_END, 0x1fff1fff, 0), + .vact_st_end = VOP_REG(RK3399_DSP_VACT_ST_END, 0x1fff1fff, 0), + .hpost_st_end = VOP_REG(RK3399_POST_DSP_HACT_INFO, 0x1fff1fff, 0), + .vpost_st_end = VOP_REG(RK3399_POST_DSP_VACT_INFO, 0x1fff1fff, 0), + .line_flag_num[0] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 0), + .line_flag_num[1] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 16), + .cfg_done = VOP_REG_MASK(RK3399_REG_CFG_DONE, 0x1, 0), }; -static const struct vop_win_phy rk3036_win1_data = { - .data_formats = formats_win_lite, - .nformats = ARRAY_SIZE(formats_win_lite), - .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1), - .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6), - .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19), - .act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0), - .dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0), - .dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0), - .yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0), - .yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0), -}; - -static const struct vop_win_data rk3036_vop_win_data[] = { - { .base = 0x00, .phy = &rk3036_win0_data, - .type = DRM_PLANE_TYPE_PRIMARY }, - { .base = 0x00, .phy = &rk3036_win1_data, - .type = DRM_PLANE_TYPE_CURSOR }, -}; - -static const int rk3036_vop_intrs[] = { - DSP_HOLD_VALID_INTR, +static const int rk3399_vop_intrs[] = { FS_INTR, + 0, 0, LINE_FLAG_INTR, + 0, BUS_ERROR_INTR, + 0, 0, 0, 0, 0, 0, 0, + DSP_HOLD_VALID_INTR, }; -static const struct vop_intr rk3036_intr = { - .intrs = rk3036_vop_intrs, - .nintrs = ARRAY_SIZE(rk3036_vop_intrs), - .status = VOP_REG(RK3036_INT_STATUS, 0xf, 0), - .enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4), - .clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8), +static const struct vop_intr rk3399_vop_intr = { + .intrs = rk3399_vop_intrs, + .nintrs = ARRAY_SIZE(rk3399_vop_intrs), + .status = VOP_REG_MASK(RK3399_INTR_STATUS0, 0xffff, 0), + .enable = VOP_REG_MASK(RK3399_INTR_EN0, 0xffff, 0), + .clear = VOP_REG_MASK(RK3399_INTR_CLEAR0, 0xffff, 0), }; -static const struct vop_ctrl rk3036_ctrl_data = { - .standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30), - .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0), - .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4), - .htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0), - .hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0), - .vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0), - .vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0), - .cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0), +static const struct vop_reg_data rk3399_init_reg_table[] = { + {RK3399_SYS_CTRL, 0x2000f800}, + {RK3399_DSP_CTRL0, 0x00000000}, + {RK3399_WIN0_CTRL0, 0x00000080}, + {RK3399_WIN1_CTRL0, 0x00000080}, + /* TODO: Win2/3 support multiple area function, but we haven't found + * a suitable way to use it yet, so let's just use them as other windows + * with only area 0 enabled. + */ + {RK3399_WIN2_CTRL0, 0x00000010}, + {RK3399_WIN3_CTRL0, 0x00000010}, }; -static const struct vop_reg_data rk3036_vop_init_reg_table[] = { - {RK3036_DSP_CTRL1, 0x00000000}, +static const struct vop_data rk3399_vop_big = { + .init_table = rk3399_init_reg_table, + .table_size = ARRAY_SIZE(rk3399_init_reg_table), + .intr = &rk3399_vop_intr, + .ctrl = &rk3399_ctrl_data, + /* + * rk3399 vop big windows register layout is same as rk3288. + */ + .win = rk3288_vop_win_data, + .win_size = ARRAY_SIZE(rk3288_vop_win_data), }; -static const struct vop_data rk3036_vop = { - .init_table = rk3036_vop_init_reg_table, - .table_size = ARRAY_SIZE(rk3036_vop_init_reg_table), - .ctrl = &rk3036_ctrl_data, - .intr = &rk3036_intr, - .win = rk3036_vop_win_data, - .win_size = ARRAY_SIZE(rk3036_vop_win_data), +static const struct vop_win_data rk3399_vop_lit_win_data[] = { + { .base = 0x00, .phy = &rk3288_win01_data, + .type = DRM_PLANE_TYPE_PRIMARY }, + { .base = 0x00, .phy = &rk3288_win23_data, + .type = DRM_PLANE_TYPE_CURSOR}, +}; + +static const struct vop_data rk3399_vop_lit = { + .init_table = rk3399_init_reg_table, + .table_size = ARRAY_SIZE(rk3399_init_reg_table), + .intr = &rk3399_vop_intr, + .ctrl = &rk3399_ctrl_data, + /* + * rk3399 vop lit windows register layout is same as rk3288, + * but cut off the win1 and win3 windows. + */ + .win = rk3399_vop_lit_win_data, + .win_size = ARRAY_SIZE(rk3399_vop_lit_win_data), }; static const struct of_device_id vop_driver_dt_match[] = { - { .compatible = "rockchip,rk3288-vop", - .data = &rk3288_vop }, { .compatible = "rockchip,rk3036-vop", .data = &rk3036_vop }, + { .compatible = "rockchip,rk3288-vop", + .data = &rk3288_vop }, + { .compatible = "rockchip,rk3399-vop-big", + .data = &rk3399_vop_big }, + { .compatible = "rockchip,rk3399-vop-lit", + .data = &rk3399_vop_lit }, {}, }; MODULE_DEVICE_TABLE(of, vop_driver_dt_match); diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h index d4b46cb..cd19726 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h @@ -166,4 +166,197 @@ #define RK3036_HWC_LUT_ADDR 0x800 /* rk3036 register definition end */ +/* rk3399 register definition */ +#define RK3399_REG_CFG_DONE 0x00000 +#define RK3399_VERSION_INFO 0x00004 +#define RK3399_SYS_CTRL 0x00008 +#define RK3399_SYS_CTRL1 0x0000c +#define RK3399_DSP_CTRL0 0x00010 +#define RK3399_DSP_CTRL1 0x00014 +#define RK3399_DSP_BG 0x00018 +#define RK3399_MCU_CTRL 0x0001c +#define RK3399_WB_CTRL0 0x00020 +#define RK3399_WB_CTRL1 0x00024 +#define RK3399_WB_YRGB_MST 0x00028 +#define RK3399_WB_CBR_MST 0x0002c +#define RK3399_WIN0_CTRL0 0x00030 +#define RK3399_WIN0_CTRL1 0x00034 +#define RK3399_WIN0_COLOR_KEY 0x00038 +#define RK3399_WIN0_VIR 0x0003c +#define RK3399_WIN0_YRGB_MST 0x00040 +#define RK3399_WIN0_CBR_MST 0x00044 +#define RK3399_WIN0_ACT_INFO 0x00048 +#define RK3399_WIN0_DSP_INFO 0x0004c +#define RK3399_WIN0_DSP_ST 0x00050 +#define RK3399_WIN0_SCL_FACTOR_YRGB 0x00054 +#define RK3399_WIN0_SCL_FACTOR_CBR 0x00058 +#define RK3399_WIN0_SCL_OFFSET 0x0005c +#define RK3399_WIN0_SRC_ALPHA_CTRL 0x00060 +#define RK3399_WIN0_DST_ALPHA_CTRL 0x00064 +#define RK3399_WIN0_FADING_CTRL 0x00068 +#define RK3399_WIN0_CTRL2 0x0006c +#define RK3399_WIN1_CTRL0 0x00070 +#define RK3399_WIN1_CTRL1 0x00074 +#define RK3399_WIN1_COLOR_KEY 0x00078 +#define RK3399_WIN1_VIR 0x0007c +#define RK3399_WIN1_YRGB_MST 0x00080 +#define RK3399_WIN1_CBR_MST 0x00084 +#define RK3399_WIN1_ACT_INFO 0x00088 +#define RK3399_WIN1_DSP_INFO 0x0008c +#define RK3399_WIN1_DSP_ST 0x00090 +#define RK3399_WIN1_SCL_FACTOR_YRGB 0x00094 +#define RK3399_WIN1_SCL_FACTOR_CBR 0x00098 +#define RK3399_WIN1_SCL_OFFSET 0x0009c +#define RK3399_WIN1_SRC_ALPHA_CTRL 0x000a0 +#define RK3399_WIN1_DST_ALPHA_CTRL 0x000a4 +#define RK3399_WIN1_FADING_CTRL 0x000a8 +#define RK3399_WIN1_CTRL2 0x000ac +#define RK3399_WIN2_CTRL0 0x000b0 +#define RK3399_WIN2_CTRL1 0x000b4 +#define RK3399_WIN2_VIR0_1 0x000b8 +#define RK3399_WIN2_VIR2_3 0x000bc +#define RK3399_WIN2_MST0 0x000c0 +#define RK3399_WIN2_DSP_INFO0 0x000c4 +#define RK3399_WIN2_DSP_ST0 0x000c8 +#define RK3399_WIN2_COLOR_KEY 0x000cc +#define RK3399_WIN2_MST1 0x000d0 +#define RK3399_WIN2_DSP_INFO1 0x000d4 +#define RK3399_WIN2_DSP_ST1 0x000d8 +#define RK3399_WIN2_SRC_ALPHA_CTRL 0x000dc +#define RK3399_WIN2_MST2 0x000e0 +#define RK3399_WIN2_DSP_INFO2 0x000e4 +#define RK3399_WIN2_DSP_ST2 0x000e8 +#define RK3399_WIN2_DST_ALPHA_CTRL 0x000ec +#define RK3399_WIN2_MST3 0x000f0 +#define RK3399_WIN2_DSP_INFO3 0x000f4 +#define RK3399_WIN2_DSP_ST3 0x000f8 +#define RK3399_WIN2_FADING_CTRL 0x000fc +#define RK3399_WIN3_CTRL0 0x00100 +#define RK3399_WIN3_CTRL1 0x00104 +#define RK3399_WIN3_VIR0_1 0x00108 +#define RK3399_WIN3_VIR2_3 0x0010c +#define RK3399_WIN3_MST0 0x00110 +#define RK3399_WIN3_DSP_INFO0 0x00114 +#define RK3399_WIN3_DSP_ST0 0x00118 +#define RK3399_WIN3_COLOR_KEY 0x0011c +#define RK3399_WIN3_MST1 0x00120 +#define RK3399_WIN3_DSP_INFO1 0x00124 +#define RK3399_WIN3_DSP_ST1 0x00128 +#define RK3399_WIN3_SRC_ALPHA_CTRL 0x0012c +#define RK3399_WIN3_MST2 0x00130 +#define RK3399_WIN3_DSP_INFO2 0x00134 +#define RK3399_WIN3_DSP_ST2 0x00138 +#define RK3399_WIN3_DST_ALPHA_CTRL 0x0013c +#define RK3399_WIN3_MST3 0x00140 +#define RK3399_WIN3_DSP_INFO3 0x00144 +#define RK3399_WIN3_DSP_ST3 0x00148 +#define RK3399_WIN3_FADING_CTRL 0x0014c +#define RK3399_HWC_CTRL0 0x00150 +#define RK3399_HWC_CTRL1 0x00154 +#define RK3399_HWC_MST 0x00158 +#define RK3399_HWC_DSP_ST 0x0015c +#define RK3399_HWC_SRC_ALPHA_CTRL 0x00160 +#define RK3399_HWC_DST_ALPHA_CTRL 0x00164 +#define RK3399_HWC_FADING_CTRL 0x00168 +#define RK3399_HWC_RESERVED1 0x0016c +#define RK3399_POST_DSP_HACT_INFO 0x00170 +#define RK3399_POST_DSP_VACT_INFO 0x00174 +#define RK3399_POST_SCL_FACTOR_YRGB 0x00178 +#define RK3399_POST_RESERVED 0x0017c +#define RK3399_POST_SCL_CTRL 0x00180 +#define RK3399_POST_DSP_VACT_INFO_F1 0x00184 +#define RK3399_DSP_HTOTAL_HS_END 0x00188 +#define RK3399_DSP_HACT_ST_END 0x0018c +#define RK3399_DSP_VTOTAL_VS_END 0x00190 +#define RK3399_DSP_VACT_ST_END 0x00194 +#define RK3399_DSP_VS_ST_END_F1 0x00198 +#define RK3399_DSP_VACT_ST_END_F1 0x0019c +#define RK3399_PWM_CTRL 0x001a0 +#define RK3399_PWM_PERIOD_HPR 0x001a4 +#define RK3399_PWM_DUTY_LPR 0x001a8 +#define RK3399_PWM_CNT 0x001ac +#define RK3399_BCSH_COLOR_BAR 0x001b0 +#define RK3399_BCSH_BCS 0x001b4 +#define RK3399_BCSH_H 0x001b8 +#define RK3399_BCSH_CTRL 0x001bc +#define RK3399_CABC_CTRL0 0x001c0 +#define RK3399_CABC_CTRL1 0x001c4 +#define RK3399_CABC_CTRL2 0x001c8 +#define RK3399_CABC_CTRL3 0x001cc +#define RK3399_CABC_GAUSS_LINE0_0 0x001d0 +#define RK3399_CABC_GAUSS_LINE0_1 0x001d4 +#define RK3399_CABC_GAUSS_LINE1_0 0x001d8 +#define RK3399_CABC_GAUSS_LINE1_1 0x001dc +#define RK3399_CABC_GAUSS_LINE2_0 0x001e0 +#define RK3399_CABC_GAUSS_LINE2_1 0x001e4 +#define RK3399_FRC_LOWER01_0 0x001e8 +#define RK3399_FRC_LOWER01_1 0x001ec +#define RK3399_FRC_LOWER10_0 0x001f0 +#define RK3399_FRC_LOWER10_1 0x001f4 +#define RK3399_FRC_LOWER11_0 0x001f8 +#define RK3399_FRC_LOWER11_1 0x001fc +#define RK3399_AFBCD0_CTRL 0x00200 +#define RK3399_AFBCD0_HDR_PTR 0x00204 +#define RK3399_AFBCD0_PIC_SIZE 0x00208 +#define RK3399_AFBCD0_STATUS 0x0020c +#define RK3399_AFBCD1_CTRL 0x00220 +#define RK3399_AFBCD1_HDR_PTR 0x00224 +#define RK3399_AFBCD1_PIC_SIZE 0x00228 +#define RK3399_AFBCD1_STATUS 0x0022c +#define RK3399_AFBCD2_CTRL 0x00240 +#define RK3399_AFBCD2_HDR_PTR 0x00244 +#define RK3399_AFBCD2_PIC_SIZE 0x00248 +#define RK3399_AFBCD2_STATUS 0x0024c +#define RK3399_AFBCD3_CTRL 0x00260 +#define RK3399_AFBCD3_HDR_PTR 0x00264 +#define RK3399_AFBCD3_PIC_SIZE 0x00268 +#define RK3399_AFBCD3_STATUS 0x0026c +#define RK3399_INTR_EN0 0x00280 +#define RK3399_INTR_CLEAR0 0x00284 +#define RK3399_INTR_STATUS0 0x00288 +#define RK3399_INTR_RAW_STATUS0 0x0028c +#define RK3399_INTR_EN1 0x00290 +#define RK3399_INTR_CLEAR1 0x00294 +#define RK3399_INTR_STATUS1 0x00298 +#define RK3399_INTR_RAW_STATUS1 0x0029c +#define RK3399_LINE_FLAG 0x002a0 +#define RK3399_VOP_STATUS 0x002a4 +#define RK3399_BLANKING_VALUE 0x002a8 +#define RK3399_MCU_BYPASS_PORT 0x002ac +#define RK3399_WIN0_DSP_BG 0x002b0 +#define RK3399_WIN1_DSP_BG 0x002b4 +#define RK3399_WIN2_DSP_BG 0x002b8 +#define RK3399_WIN3_DSP_BG 0x002bc +#define RK3399_YUV2YUV_WIN 0x002c0 +#define RK3399_YUV2YUV_POST 0x002c4 +#define RK3399_AUTO_GATING_EN 0x002cc +#define RK3399_WIN0_CSC_COE 0x003a0 +#define RK3399_WIN1_CSC_COE 0x003c0 +#define RK3399_WIN2_CSC_COE 0x003e0 +#define RK3399_WIN3_CSC_COE 0x00400 +#define RK3399_HWC_CSC_COE 0x00420 +#define RK3399_BCSH_R2Y_CSC_COE 0x00440 +#define RK3399_BCSH_Y2R_CSC_COE 0x00460 +#define RK3399_POST_YUV2YUV_Y2R_COE 0x00480 +#define RK3399_POST_YUV2YUV_3X3_COE 0x004a0 +#define RK3399_POST_YUV2YUV_R2Y_COE 0x004c0 +#define RK3399_WIN0_YUV2YUV_Y2R 0x004e0 +#define RK3399_WIN0_YUV2YUV_3X3 0x00500 +#define RK3399_WIN0_YUV2YUV_R2Y 0x00520 +#define RK3399_WIN1_YUV2YUV_Y2R 0x00540 +#define RK3399_WIN1_YUV2YUV_3X3 0x00560 +#define RK3399_WIN1_YUV2YUV_R2Y 0x00580 +#define RK3399_WIN2_YUV2YUV_Y2R 0x005a0 +#define RK3399_WIN2_YUV2YUV_3X3 0x005c0 +#define RK3399_WIN2_YUV2YUV_R2Y 0x005e0 +#define RK3399_WIN3_YUV2YUV_Y2R 0x00600 +#define RK3399_WIN3_YUV2YUV_3X3 0x00620 +#define RK3399_WIN3_YUV2YUV_R2Y 0x00640 +#define RK3399_WIN2_LUT_ADDR 0x01000 +#define RK3399_WIN3_LUT_ADDR 0x01400 +#define RK3399_HWC_LUT_ADDR 0x01800 +#define RK3399_CABC_GAMMA_LUT_ADDR 0x01c00 +#define RK3399_GAMMA_LUT_ADDR 0x02000 +/* rk3399 register definition end */ + #endif /* _ROCKCHIP_VOP_REG_H */ diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 42c074a..c2a30bd 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -354,14 +354,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) - ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu, - mem); + ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, no_wait_gpu, mem); else - ret = ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, mem); + ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem); if (ret) { if (bdev->driver->move_notify) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index f157a9e..bf6e216 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -45,8 +45,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool interruptible, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; @@ -329,8 +329,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, } int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, + bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index a1803fb..29855be 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -600,3 +600,9 @@ size_t ttm_round_pot(size_t size) return 0; } EXPORT_SYMBOL(ttm_round_pot); + +uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob) +{ + return glob->zone_kernel->max_mem; +} +EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size); |