diff options
Diffstat (limited to 'drivers/gpu')
176 files changed, 11492 insertions, 2065 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 151a050..47f2ce8 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -165,6 +165,15 @@ config DRM_SAVAGE Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister chipset. If M is selected the module will be called savage. +config DRM_VGEM + tristate "Virtual GEM provider" + depends on DRM + help + Choose this option to get a virtual graphics memory manager, + as used by Mesa's software renderer for enhanced performance. + If M is selected the module will be called vgem. + + source "drivers/gpu/drm/exynos/Kconfig" source "drivers/gpu/drm/rockchip/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 2c239b9..7d4944e 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_DRM_SIS) += sis/ obj-$(CONFIG_DRM_SAVAGE)+= savage/ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/ obj-$(CONFIG_DRM_VIA) +=via/ +obj-$(CONFIG_DRM_VGEM) += vgem/ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ obj-$(CONFIG_DRM_EXYNOS) +=exynos/ obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 5c50aa8..19a4fba 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -435,21 +435,22 @@ static int kfd_ioctl_get_clock_counters(struct file *filep, { struct kfd_ioctl_get_clock_counters_args *args = data; struct kfd_dev *dev; - struct timespec time; + struct timespec64 time; dev = kfd_device_by_id(args->gpu_id); if (dev == NULL) return -EINVAL; /* Reading GPU clock counter from KGD */ - args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); + args->gpu_clock_counter = + dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); /* No access to rdtsc. Using raw monotonic time */ - getrawmonotonic(&time); - args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time); + getrawmonotonic64(&time); + args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time); - get_monotonic_boottime(&time); - args->system_clock_counter = (uint64_t)timespec_to_ns(&time); + get_monotonic_boottime64(&time); + args->system_clock_counter = (uint64_t)timespec64_to_ns(&time); /* Since the counter is in nano-seconds we use 1GHz frequency */ args->system_clock_freq = 1000000000; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 5bc32c2..ca7f2d3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -94,7 +94,8 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did) return NULL; } -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev) +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, + struct pci_dev *pdev, const struct kfd2kgd_calls *f2g) { struct kfd_dev *kfd; @@ -112,6 +113,11 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev) kfd->device_info = device_info; kfd->pdev = pdev; kfd->init_complete = false; + kfd->kfd2kgd = f2g; + + mutex_init(&kfd->doorbell_mutex); + memset(&kfd->doorbell_available_index, 0, + sizeof(kfd->doorbell_available_index)); return kfd; } @@ -200,8 +206,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, /* add another 512KB for all other allocations on gart (HPD, fences) */ size += 512 * 1024; - if (kfd2kgd->init_gtt_mem_allocation(kfd->kgd, size, &kfd->gtt_mem, - &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)) { + if (kfd->kfd2kgd->init_gtt_mem_allocation( + kfd->kgd, size, &kfd->gtt_mem, + &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ dev_err(kfd_device, "Could not allocate %d bytes for device (%x:%x)\n", size, kfd->pdev->vendor, kfd->pdev->device); @@ -270,7 +277,7 @@ device_iommu_pasid_error: kfd_topology_add_device_error: kfd_gtt_sa_fini(kfd); kfd_gtt_sa_init_error: - kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); + kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); dev_err(kfd_device, "device (%x:%x) NOT added due to errors\n", kfd->pdev->vendor, kfd->pdev->device); @@ -285,7 +292,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) amd_iommu_free_device(kfd->pdev); kfd_topology_remove_device(kfd); kfd_gtt_sa_fini(kfd); - kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); + kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); } kfree(kfd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index d8135ad..69af73f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -82,7 +82,8 @@ static inline unsigned int get_pipes_num_cpsch(void) void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { - return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid, + return dqm->dev->kfd2kgd->program_sh_mem_settings( + dqm->dev->kgd, qpd->vmid, qpd->sh_mem_config, qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, @@ -457,9 +458,12 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid, { uint32_t pasid_mapping; - pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | - ATC_VMID_PASID_MAPPING_VALID; - return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping, + pasid_mapping = (pasid == 0) ? 0 : + (uint32_t)pasid | + ATC_VMID_PASID_MAPPING_VALID; + + return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( + dqm->dev->kgd, pasid_mapping, vmid); } @@ -511,7 +515,7 @@ int init_pipelines(struct device_queue_manager *dqm, pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); /* = log2(bytes/4)-1 */ - kfd2kgd->init_pipeline(dqm->dev->kgd, inx, + dqm->dev->kfd2kgd->init_pipeline(dqm->dev->kgd, inx, CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); } @@ -905,7 +909,7 @@ out: return retval; } -static int fence_wait_timeout(unsigned int *fence_addr, +static int amdkfd_fence_wait_timeout(unsigned int *fence_addr, unsigned int fence_value, unsigned long timeout) { @@ -961,7 +965,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock) pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr, KFD_FENCE_COMPLETED); /* should be timed out */ - fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, + amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS); pm_release_ib(&dqm->packets); dqm->active_runlist = false; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index 1a9b355..17e56dc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -32,9 +32,6 @@ * and that's assures that any user process won't get access to the * kernel doorbells page */ -static DEFINE_MUTEX(doorbell_mutex); -static unsigned long doorbell_available_index[ - DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)] = { 0 }; #define KERNEL_DOORBELL_PASID 1 #define KFD_SIZE_OF_DOORBELL_IN_BYTES 4 @@ -170,12 +167,12 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, BUG_ON(!kfd || !doorbell_off); - mutex_lock(&doorbell_mutex); - inx = find_first_zero_bit(doorbell_available_index, + mutex_lock(&kfd->doorbell_mutex); + inx = find_first_zero_bit(kfd->doorbell_available_index, KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); - __set_bit(inx, doorbell_available_index); - mutex_unlock(&doorbell_mutex); + __set_bit(inx, kfd->doorbell_available_index); + mutex_unlock(&kfd->doorbell_mutex); if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) return NULL; @@ -203,9 +200,9 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr) inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr); - mutex_lock(&doorbell_mutex); - __clear_bit(inx, doorbell_available_index); - mutex_unlock(&doorbell_mutex); + mutex_lock(&kfd->doorbell_mutex); + __clear_bit(inx, kfd->doorbell_available_index); + mutex_unlock(&kfd->doorbell_mutex); } inline void write_kernel_doorbell(u32 __iomem *db, u32 value) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 3f34ae1..4e0a68f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -34,7 +34,6 @@ #define KFD_DRIVER_MINOR 7 #define KFD_DRIVER_PATCHLEVEL 1 -const struct kfd2kgd_calls *kfd2kgd; static const struct kgd2kfd_calls kgd2kfd = { .exit = kgd2kfd_exit, .probe = kgd2kfd_probe, @@ -55,9 +54,7 @@ module_param(max_num_of_queues_per_device, int, 0444); MODULE_PARM_DESC(max_num_of_queues_per_device, "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); -bool kgd2kfd_init(unsigned interface_version, - const struct kfd2kgd_calls *f2g, - const struct kgd2kfd_calls **g2f) +bool kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f) { /* * Only one interface version is supported, @@ -66,11 +63,6 @@ bool kgd2kfd_init(unsigned interface_version, if (interface_version != KFD_INTERFACE_VERSION) return false; - /* Protection against multiple amd kgd loads */ - if (kfd2kgd) - return true; - - kfd2kgd = f2g; *g2f = &kgd2kfd; return true; @@ -85,8 +77,6 @@ static int __init kfd_module_init(void) { int err; - kfd2kgd = NULL; - /* Verify module parameters */ if ((sched_policy < KFD_SCHED_POLICY_HWS) || (sched_policy > KFD_SCHED_POLICY_NO_HWS)) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index a09e18a..4349794 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -151,14 +151,15 @@ static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd, static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr) { - return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr); + return mm->dev->kfd2kgd->hqd_load + (mm->dev->kgd, mqd, pipe_id, queue_id, wptr); } static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr) { - return kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd); } static int update_mqd(struct mqd_manager *mm, void *mqd, @@ -245,7 +246,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout, + return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout, pipe_id, queue_id); } @@ -258,7 +259,7 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); } static bool is_occupied(struct mqd_manager *mm, void *mqd, @@ -266,7 +267,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { - return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, + return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, pipe_id, queue_id); } @@ -275,7 +276,7 @@ static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); } /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 5a44f2f..f21fcce 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -148,6 +148,11 @@ struct kfd_dev { struct kgd2kfd_shared_resources shared_resources; + const struct kfd2kgd_calls *kfd2kgd; + struct mutex doorbell_mutex; + unsigned long doorbell_available_index[DIV_ROUND_UP( + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)]; + void *gtt_mem; uint64_t gtt_start_gpu_addr; void *gtt_start_cpu_ptr; @@ -164,13 +169,12 @@ struct kfd_dev { /* KGD2KFD callbacks */ void kgd2kfd_exit(void); -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev); +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, + struct pci_dev *pdev, const struct kfd2kgd_calls *f2g); bool kgd2kfd_device_init(struct kfd_dev *kfd, - const struct kgd2kfd_shared_resources *gpu_resources); + const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); -extern const struct kfd2kgd_calls *kfd2kgd; - enum kfd_mempool { KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, @@ -378,8 +382,6 @@ struct qcm_process_device { /* The Device Queue Manager that owns this data */ struct device_queue_manager *dqm; struct process_queue_manager *pqm; - /* Device Queue Manager lock */ - struct mutex *lock; /* Queues list */ struct list_head queues_list; struct list_head priv_queue_list; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index a369c14..945d622 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -162,10 +162,16 @@ static void kfd_process_wq_release(struct work_struct *work) p = my_work->p; + pr_debug("Releasing process (pasid %d) in workqueue\n", + p->pasid); + mutex_lock(&p->mutex); list_for_each_entry_safe(pdd, temp, &p->per_device_data, per_device_list) { + pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n", + pdd->dev->id, p->pasid); + amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid); list_del(&pdd->per_device_list); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 4983993..661c660 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -726,13 +726,14 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, } sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute", - kfd2kgd->get_max_engine_clock_in_mhz( + dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz( dev->gpu->kgd)); sysfs_show_64bit_prop(buffer, "local_mem_size", - kfd2kgd->get_vmem_size(dev->gpu->kgd)); + dev->gpu->kfd2kgd->get_vmem_size( + dev->gpu->kgd)); sysfs_show_32bit_prop(buffer, "fw_version", - kfd2kgd->get_fw_version( + dev->gpu->kfd2kgd->get_fw_version( dev->gpu->kgd, KGD_ENGINE_MEC1)); } @@ -1099,8 +1100,9 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) buf[2] = gpu->pdev->subsystem_device; buf[3] = gpu->pdev->device; buf[4] = gpu->pdev->bus->number; - buf[5] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) & 0xffffffff); - buf[6] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) >> 32); + buf[5] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) + & 0xffffffff); + buf[6] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) >> 32); for (i = 0, hashout = 0; i < 7; i++) hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH); diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 239bc16..dabd944 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -77,37 +77,6 @@ struct kgd2kfd_shared_resources { }; /** - * struct kgd2kfd_calls - * - * @exit: Notifies amdkfd that kgd module is unloaded - * - * @probe: Notifies amdkfd about a probe done on a device in the kgd driver. - * - * @device_init: Initialize the newly probed device (if it is a device that - * amdkfd supports) - * - * @device_exit: Notifies amdkfd about a removal of a kgd device - * - * @suspend: Notifies amdkfd about a suspend action done to a kgd device - * - * @resume: Notifies amdkfd about a resume action done to a kgd device - * - * This structure contains function callback pointers so the kgd driver - * will notify to the amdkfd about certain status changes. - * - */ -struct kgd2kfd_calls { - void (*exit)(void); - struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev); - bool (*device_init)(struct kfd_dev *kfd, - const struct kgd2kfd_shared_resources *gpu_resources); - void (*device_exit)(struct kfd_dev *kfd); - void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry); - void (*suspend)(struct kfd_dev *kfd); - int (*resume)(struct kfd_dev *kfd); -}; - -/** * struct kfd2kgd_calls * * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture. @@ -196,8 +165,39 @@ struct kfd2kgd_calls { enum kgd_engine_type type); }; +/** + * struct kgd2kfd_calls + * + * @exit: Notifies amdkfd that kgd module is unloaded + * + * @probe: Notifies amdkfd about a probe done on a device in the kgd driver. + * + * @device_init: Initialize the newly probed device (if it is a device that + * amdkfd supports) + * + * @device_exit: Notifies amdkfd about a removal of a kgd device + * + * @suspend: Notifies amdkfd about a suspend action done to a kgd device + * + * @resume: Notifies amdkfd about a resume action done to a kgd device + * + * This structure contains function callback pointers so the kgd driver + * will notify to the amdkfd about certain status changes. + * + */ +struct kgd2kfd_calls { + void (*exit)(void); + struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev, + const struct kfd2kgd_calls *f2g); + bool (*device_init)(struct kfd_dev *kfd, + const struct kgd2kfd_shared_resources *gpu_resources); + void (*device_exit)(struct kfd_dev *kfd); + void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry); + void (*suspend)(struct kfd_dev *kfd); + int (*resume)(struct kfd_dev *kfd); +}; + bool kgd2kfd_init(unsigned interface_version, - const struct kfd2kgd_calls *f2g, const struct kgd2kfd_calls **g2f); #endif /* KGD_KFD_INTERFACE_H_INCLUDED */ diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index d55c0c2..f69b925 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -207,6 +207,27 @@ static void atmel_hlcdc_crtc_enable(struct drm_crtc *c) crtc->enabled = true; } +void atmel_hlcdc_crtc_suspend(struct drm_crtc *c) +{ + struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); + + if (crtc->enabled) { + atmel_hlcdc_crtc_disable(c); + /* save enable state for resume */ + crtc->enabled = true; + } +} + +void atmel_hlcdc_crtc_resume(struct drm_crtc *c) +{ + struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); + + if (crtc->enabled) { + crtc->enabled = false; + atmel_hlcdc_crtc_enable(c); + } +} + static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c, struct drm_crtc_state *s) { diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index c4bb1f9..60b0c13 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -569,14 +569,8 @@ static int atmel_hlcdc_dc_drm_suspend(struct device *dev) return 0; drm_modeset_lock_all(drm_dev); - list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - if (crtc->enabled) { - crtc_funcs->disable(crtc); - /* save enable state for resume */ - crtc->enabled = true; - } - } + list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) + atmel_hlcdc_crtc_suspend(crtc); drm_modeset_unlock_all(drm_dev); return 0; } @@ -590,13 +584,8 @@ static int atmel_hlcdc_dc_drm_resume(struct device *dev) return 0; drm_modeset_lock_all(drm_dev); - list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - if (crtc->enabled) { - crtc->enabled = false; - crtc_funcs->enable(crtc); - } - } + list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) + atmel_hlcdc_crtc_resume(crtc); drm_modeset_unlock_all(drm_dev); return 0; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h index 1ea9c2c..cf6b375 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h @@ -155,6 +155,9 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c); void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file); +void atmel_hlcdc_crtc_suspend(struct drm_crtc *crtc); +void atmel_hlcdc_crtc_resume(struct drm_crtc *crtc); + int atmel_hlcdc_crtc_create(struct drm_device *dev); int atmel_hlcdc_create_outputs(struct drm_device *dev); diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c index 4603897..a39b034 100644 --- a/drivers/gpu/drm/bochs/bochs_hw.c +++ b/drivers/gpu/drm/bochs/bochs_hw.c @@ -164,6 +164,7 @@ void bochs_hw_setmode(struct bochs_device *bochs, bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */ + bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0); bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp); bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres); bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres); diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index f38bbcd..acef322 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -11,3 +11,14 @@ config DRM_PTN3460 select DRM_PANEL ---help--- ptn3460 eDP-LVDS bridge chip driver. + +config DRM_PS8622 + tristate "Parade eDP/LVDS bridge" + depends on DRM + depends on OF + select DRM_PANEL + select DRM_KMS_HELPER + select BACKLIGHT_LCD_SUPPORT + select BACKLIGHT_CLASS_DEVICE + ---help--- + parade eDP-LVDS bridge chip driver. diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index d8a8cfd..8dfebd9 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -1,4 +1,5 @@ ccflags-y := -Iinclude/drm +obj-$(CONFIG_DRM_PS8622) += ps8622.o obj-$(CONFIG_DRM_PTN3460) += ptn3460.o obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/ps8622.c new file mode 100644 index 0000000..e895aa7 --- /dev/null +++ b/drivers/gpu/drm/bridge/ps8622.c @@ -0,0 +1,684 @@ +/* + * Parade PS8622 eDP/LVDS bridge driver + * + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/fb.h> +#include <linux/gpio.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/pm.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_panel.h> + +#include "drmP.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" + +/* Brightness scale on the Parade chip */ +#define PS8622_MAX_BRIGHTNESS 0xff + +/* Timings taken from the version 1.7 datasheet for the PS8622/PS8625 */ +#define PS8622_POWER_RISE_T1_MIN_US 10 +#define PS8622_POWER_RISE_T1_MAX_US 10000 +#define PS8622_RST_HIGH_T2_MIN_US 3000 +#define PS8622_RST_HIGH_T2_MAX_US 30000 +#define PS8622_PWMO_END_T12_MS 200 +#define PS8622_POWER_FALL_T16_MAX_US 10000 +#define PS8622_POWER_OFF_T17_MS 500 + +#if ((PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US) > \ + (PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US)) +#error "T2.min + T1.max must be less than T2.max + T1.min" +#endif + +struct ps8622_bridge { + struct drm_connector connector; + struct i2c_client *client; + struct drm_bridge bridge; + struct drm_panel *panel; + struct regulator *v12; + struct backlight_device *bl; + + struct gpio_desc *gpio_slp; + struct gpio_desc *gpio_rst; + + u32 max_lane_count; + u32 lane_count; + + bool enabled; +}; + +static inline struct ps8622_bridge * + bridge_to_ps8622(struct drm_bridge *bridge) +{ + return container_of(bridge, struct ps8622_bridge, bridge); +} + +static inline struct ps8622_bridge * + connector_to_ps8622(struct drm_connector *connector) +{ + return container_of(connector, struct ps8622_bridge, connector); +} + +static int ps8622_set(struct i2c_client *client, u8 page, u8 reg, u8 val) +{ + int ret; + struct i2c_adapter *adap = client->adapter; + struct i2c_msg msg; + u8 data[] = {reg, val}; + + msg.addr = client->addr + page; + msg.flags = 0; + msg.len = sizeof(data); + msg.buf = data; + + ret = i2c_transfer(adap, &msg, 1); + if (ret != 1) + pr_warn("PS8622 I2C write (0x%02x,0x%02x,0x%02x) failed: %d\n", + client->addr + page, reg, val, ret); + return !(ret == 1); +} + +static int ps8622_send_config(struct ps8622_bridge *ps8622) +{ + struct i2c_client *cl = ps8622->client; + int err = 0; + + /* HPD low */ + err = ps8622_set(cl, 0x02, 0xa1, 0x01); + if (err) + goto error; + + /* SW setting: [1:0] SW output 1.2V voltage is lower to 96% */ + err = ps8622_set(cl, 0x04, 0x14, 0x01); + if (err) + goto error; + + /* RCO SS setting: [5:4] = b01 0.5%, b10 1%, b11 1.5% */ + err = ps8622_set(cl, 0x04, 0xe3, 0x20); + if (err) + goto error; + + /* [7] RCO SS enable */ + err = ps8622_set(cl, 0x04, 0xe2, 0x80); + if (err) + goto error; + + /* RPHY Setting + * [3:2] CDR tune wait cycle before measure for fine tune + * b00: 1us b01: 0.5us b10:2us, b11: 4us + */ + err = ps8622_set(cl, 0x04, 0x8a, 0x0c); + if (err) + goto error; + + /* [3] RFD always on */ + err = ps8622_set(cl, 0x04, 0x89, 0x08); + if (err) + goto error; + + /* CTN lock in/out: 20000ppm/80000ppm. Lock out 2 times. */ + err = ps8622_set(cl, 0x04, 0x71, 0x2d); + if (err) + goto error; + + /* 2.7G CDR settings: NOF=40LSB for HBR CDR setting */ + err = ps8622_set(cl, 0x04, 0x7d, 0x07); + if (err) + goto error; + + /* [1:0] Fmin=+4bands */ + err = ps8622_set(cl, 0x04, 0x7b, 0x00); + if (err) + goto error; + + /* [7:5] DCO_FTRNG=+-40% */ + err = ps8622_set(cl, 0x04, 0x7a, 0xfd); + if (err) + goto error; + + /* 1.62G CDR settings: [5:2]NOF=64LSB [1:0]DCO scale is 2/5 */ + err = ps8622_set(cl, 0x04, 0xc0, 0x12); + if (err) + goto error; + + /* Gitune=-37% */ + err = ps8622_set(cl, 0x04, 0xc1, 0x92); + if (err) + goto error; + + /* Fbstep=100% */ + err = ps8622_set(cl, 0x04, 0xc2, 0x1c); + if (err) + goto error; + + /* [7] LOS signal disable */ + err = ps8622_set(cl, 0x04, 0x32, 0x80); + if (err) + goto error; + + /* RPIO Setting: [7:4] LVDS driver bias current : 75% (250mV swing) */ + err = ps8622_set(cl, 0x04, 0x00, 0xb0); + if (err) + goto error; + + /* [7:6] Right-bar GPIO output strength is 8mA */ + err = ps8622_set(cl, 0x04, 0x15, 0x40); + if (err) + goto error; + + /* EQ Training State Machine Setting, RCO calibration start */ + err = ps8622_set(cl, 0x04, 0x54, 0x10); + if (err) + goto error; + + /* Logic, needs more than 10 I2C command */ + /* [4:0] MAX_LANE_COUNT set to max supported lanes */ + err = ps8622_set(cl, 0x01, 0x02, 0x80 | ps8622->max_lane_count); + if (err) + goto error; + + /* [4:0] LANE_COUNT_SET set to chosen lane count */ + err = ps8622_set(cl, 0x01, 0x21, 0x80 | ps8622->lane_count); + if (err) + goto error; + + err = ps8622_set(cl, 0x00, 0x52, 0x20); + if (err) + goto error; + + /* HPD CP toggle enable */ + err = ps8622_set(cl, 0x00, 0xf1, 0x03); + if (err) + goto error; + + err = ps8622_set(cl, 0x00, 0x62, 0x41); + if (err) + goto error; + + /* Counter number, add 1ms counter delay */ + err = ps8622_set(cl, 0x00, 0xf6, 0x01); + if (err) + goto error; + + /* [6]PWM function control by DPCD0040f[7], default is PWM block */ + err = ps8622_set(cl, 0x00, 0x77, 0x06); + if (err) + goto error; + + /* 04h Adjust VTotal toleranceto fix the 30Hz no display issue */ + err = ps8622_set(cl, 0x00, 0x4c, 0x04); + if (err) + goto error; + + /* DPCD00400='h00, Parade OUI ='h001cf8 */ + err = ps8622_set(cl, 0x01, 0xc0, 0x00); + if (err) + goto error; + + /* DPCD00401='h1c */ + err = ps8622_set(cl, 0x01, 0xc1, 0x1c); + if (err) + goto error; + + /* DPCD00402='hf8 */ + err = ps8622_set(cl, 0x01, 0xc2, 0xf8); + if (err) + goto error; + + /* DPCD403~408 = ASCII code, D2SLV5='h4432534c5635 */ + err = ps8622_set(cl, 0x01, 0xc3, 0x44); + if (err) + goto error; + + /* DPCD404 */ + err = ps8622_set(cl, 0x01, 0xc4, 0x32); + if (err) + goto error; + + /* DPCD405 */ + err = ps8622_set(cl, 0x01, 0xc5, 0x53); + if (err) + goto error; + + /* DPCD406 */ + err = ps8622_set(cl, 0x01, 0xc6, 0x4c); + if (err) + goto error; + + /* DPCD407 */ + err = ps8622_set(cl, 0x01, 0xc7, 0x56); + if (err) + goto error; + + /* DPCD408 */ + err = ps8622_set(cl, 0x01, 0xc8, 0x35); + if (err) + goto error; + + /* DPCD40A, Initial Code major revision '01' */ + err = ps8622_set(cl, 0x01, 0xca, 0x01); + if (err) + goto error; + + /* DPCD40B, Initial Code minor revision '05' */ + err = ps8622_set(cl, 0x01, 0xcb, 0x05); + if (err) + goto error; + + + if (ps8622->bl) { + /* DPCD720, internal PWM */ + err = ps8622_set(cl, 0x01, 0xa5, 0xa0); + if (err) + goto error; + + /* FFh for 100% brightness, 0h for 0% brightness */ + err = ps8622_set(cl, 0x01, 0xa7, + ps8622->bl->props.brightness); + if (err) + goto error; + } else { + /* DPCD720, external PWM */ + err = ps8622_set(cl, 0x01, 0xa5, 0x80); + if (err) + goto error; + } + + /* Set LVDS output as 6bit-VESA mapping, single LVDS channel */ + err = ps8622_set(cl, 0x01, 0xcc, 0x13); + if (err) + goto error; + + /* Enable SSC set by register */ + err = ps8622_set(cl, 0x02, 0xb1, 0x20); + if (err) + goto error; + + /* Set SSC enabled and +/-1% central spreading */ + err = ps8622_set(cl, 0x04, 0x10, 0x16); + if (err) + goto error; + + /* Logic end */ + /* MPU Clock source: LC => RCO */ + err = ps8622_set(cl, 0x04, 0x59, 0x60); + if (err) + goto error; + + /* LC -> RCO */ + err = ps8622_set(cl, 0x04, 0x54, 0x14); + if (err) + goto error; + + /* HPD high */ + err = ps8622_set(cl, 0x02, 0xa1, 0x91); + +error: + return err ? -EIO : 0; +} + +static int ps8622_backlight_update(struct backlight_device *bl) +{ + struct ps8622_bridge *ps8622 = dev_get_drvdata(&bl->dev); + int ret, brightness = bl->props.brightness; + + if (bl->props.power != FB_BLANK_UNBLANK || + bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK)) + brightness = 0; + + if (!ps8622->enabled) + return -EINVAL; + + ret = ps8622_set(ps8622->client, 0x01, 0xa7, brightness); + + return ret; +} + +static const struct backlight_ops ps8622_backlight_ops = { + .update_status = ps8622_backlight_update, +}; + +static void ps8622_pre_enable(struct drm_bridge *bridge) +{ + struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); + int ret; + + if (ps8622->enabled) + return; + + gpiod_set_value(ps8622->gpio_rst, 0); + + if (ps8622->v12) { + ret = regulator_enable(ps8622->v12); + if (ret) + DRM_ERROR("fails to enable ps8622->v12"); + } + + if (drm_panel_prepare(ps8622->panel)) { + DRM_ERROR("failed to prepare panel\n"); + return; + } + + gpiod_set_value(ps8622->gpio_slp, 1); + + /* + * T1 is the range of time that it takes for the power to rise after we + * enable the lcd/ps8622 fet. T2 is the range of time in which the + * data sheet specifies we should deassert the reset pin. + * + * If it takes T1.max for the power to rise, we need to wait atleast + * T2.min before deasserting the reset pin. If it takes T1.min for the + * power to rise, we need to wait at most T2.max before deasserting the + * reset pin. + */ + usleep_range(PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US, + PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US); + + gpiod_set_value(ps8622->gpio_rst, 1); + + /* wait 20ms after RST high */ + usleep_range(20000, 30000); + + ret = ps8622_send_config(ps8622); + if (ret) { + DRM_ERROR("Failed to send config to bridge (%d)\n", ret); + return; + } + + ps8622->enabled = true; +} + +static void ps8622_enable(struct drm_bridge *bridge) +{ + struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); + + if (drm_panel_enable(ps8622->panel)) { + DRM_ERROR("failed to enable panel\n"); + return; + } +} + +static void ps8622_disable(struct drm_bridge *bridge) +{ + struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); + + if (drm_panel_disable(ps8622->panel)) { + DRM_ERROR("failed to disable panel\n"); + return; + } + msleep(PS8622_PWMO_END_T12_MS); +} + +static void ps8622_post_disable(struct drm_bridge *bridge) +{ + struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); + + if (!ps8622->enabled) + return; + + ps8622->enabled = false; + + /* + * This doesn't matter if the regulators are turned off, but something + * else might keep them on. In that case, we want to assert the slp gpio + * to lower power. + */ + gpiod_set_value(ps8622->gpio_slp, 0); + + if (drm_panel_unprepare(ps8622->panel)) { + DRM_ERROR("failed to unprepare panel\n"); + return; + } + + if (ps8622->v12) + regulator_disable(ps8622->v12); + + /* + * Sleep for at least the amount of time that it takes the power rail to + * fall to prevent asserting the rst gpio from doing anything. + */ + usleep_range(PS8622_POWER_FALL_T16_MAX_US, + 2 * PS8622_POWER_FALL_T16_MAX_US); + gpiod_set_value(ps8622->gpio_rst, 0); + + msleep(PS8622_POWER_OFF_T17_MS); +} + +static int ps8622_get_modes(struct drm_connector *connector) +{ + struct ps8622_bridge *ps8622; + + ps8622 = connector_to_ps8622(connector); + + return drm_panel_get_modes(ps8622->panel); +} + +static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector) +{ + struct ps8622_bridge *ps8622; + + ps8622 = connector_to_ps8622(connector); + + return ps8622->bridge.encoder; +} + +static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = { + .get_modes = ps8622_get_modes, + .best_encoder = ps8622_best_encoder, +}; + +static enum drm_connector_status ps8622_detect(struct drm_connector *connector, + bool force) +{ + return connector_status_connected; +} + +static void ps8622_connector_destroy(struct drm_connector *connector) +{ + drm_connector_cleanup(connector); +} + +static const struct drm_connector_funcs ps8622_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = ps8622_detect, + .destroy = ps8622_connector_destroy, +}; + +static int ps8622_attach(struct drm_bridge *bridge) +{ + struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); + int ret; + + if (!bridge->encoder) { + DRM_ERROR("Parent encoder object not found"); + return -ENODEV; + } + + ps8622->connector.polled = DRM_CONNECTOR_POLL_HPD; + ret = drm_connector_init(bridge->dev, &ps8622->connector, + &ps8622_connector_funcs, DRM_MODE_CONNECTOR_LVDS); + if (ret) { + DRM_ERROR("Failed to initialize connector with drm\n"); + return ret; + } + drm_connector_helper_add(&ps8622->connector, + &ps8622_connector_helper_funcs); + drm_connector_register(&ps8622->connector); + drm_mode_connector_attach_encoder(&ps8622->connector, + bridge->encoder); + + if (ps8622->panel) + drm_panel_attach(ps8622->panel, &ps8622->connector); + + drm_helper_hpd_irq_event(ps8622->connector.dev); + + return ret; +} + +static const struct drm_bridge_funcs ps8622_bridge_funcs = { + .pre_enable = ps8622_pre_enable, + .enable = ps8622_enable, + .disable = ps8622_disable, + .post_disable = ps8622_post_disable, + .attach = ps8622_attach, +}; + +static const struct of_device_id ps8622_devices[] = { + {.compatible = "parade,ps8622",}, + {.compatible = "parade,ps8625",}, + {} +}; +MODULE_DEVICE_TABLE(of, ps8622_devices); + +static int ps8622_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device_node *endpoint, *panel_node; + struct ps8622_bridge *ps8622; + int ret; + + ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL); + if (!ps8622) + return -ENOMEM; + + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (endpoint) { + panel_node = of_graph_get_remote_port_parent(endpoint); + if (panel_node) { + ps8622->panel = of_drm_find_panel(panel_node); + of_node_put(panel_node); + if (!ps8622->panel) + return -EPROBE_DEFER; + } + } + + ps8622->client = client; + + ps8622->v12 = devm_regulator_get(dev, "vdd12"); + if (IS_ERR(ps8622->v12)) { + dev_info(dev, "no 1.2v regulator found for PS8622\n"); + ps8622->v12 = NULL; + } + + ps8622->gpio_slp = devm_gpiod_get(dev, "sleep"); + if (IS_ERR(ps8622->gpio_slp)) { + ret = PTR_ERR(ps8622->gpio_slp); + dev_err(dev, "cannot get gpio_slp %d\n", ret); + return ret; + } + ret = gpiod_direction_output(ps8622->gpio_slp, 1); + if (ret) { + dev_err(dev, "cannot configure gpio_slp\n"); + return ret; + } + + ps8622->gpio_rst = devm_gpiod_get(dev, "reset"); + if (IS_ERR(ps8622->gpio_rst)) { + ret = PTR_ERR(ps8622->gpio_rst); + dev_err(dev, "cannot get gpio_rst %d\n", ret); + return ret; + } + /* + * Assert the reset pin high to avoid the bridge being + * initialized prematurely + */ + ret = gpiod_direction_output(ps8622->gpio_rst, 1); + if (ret) { + dev_err(dev, "cannot configure gpio_rst\n"); + return ret; + } + + ps8622->max_lane_count = id->driver_data; + + if (of_property_read_u32(dev->of_node, "lane-count", + &ps8622->lane_count)) { + ps8622->lane_count = ps8622->max_lane_count; + } else if (ps8622->lane_count > ps8622->max_lane_count) { + dev_info(dev, "lane-count property is too high," + "using max_lane_count\n"); + ps8622->lane_count = ps8622->max_lane_count; + } + + if (!of_find_property(dev->of_node, "use-external-pwm", NULL)) { + ps8622->bl = backlight_device_register("ps8622-backlight", + dev, ps8622, &ps8622_backlight_ops, + NULL); + if (IS_ERR(ps8622->bl)) { + DRM_ERROR("failed to register backlight\n"); + ret = PTR_ERR(ps8622->bl); + ps8622->bl = NULL; + return ret; + } + ps8622->bl->props.max_brightness = PS8622_MAX_BRIGHTNESS; + ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS; + } + + ps8622->bridge.funcs = &ps8622_bridge_funcs; + ps8622->bridge.of_node = dev->of_node; + ret = drm_bridge_add(&ps8622->bridge); + if (ret) { + DRM_ERROR("Failed to add bridge\n"); + return ret; + } + + i2c_set_clientdata(client, ps8622); + + return 0; +} + +static int ps8622_remove(struct i2c_client *client) +{ + struct ps8622_bridge *ps8622 = i2c_get_clientdata(client); + + if (ps8622->bl) + backlight_device_unregister(ps8622->bl); + + drm_bridge_remove(&ps8622->bridge); + + return 0; +} + +static const struct i2c_device_id ps8622_i2c_table[] = { + /* Device type, max_lane_count */ + {"ps8622", 1}, + {"ps8625", 2}, + {}, +}; +MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table); + +static struct i2c_driver ps8622_driver = { + .id_table = ps8622_i2c_table, + .probe = ps8622_probe, + .remove = ps8622_remove, + .driver = { + .name = "ps8622", + .owner = THIS_MODULE, + .of_match_table = ps8622_devices, + }, +}; +module_i2c_driver(ps8622_driver); + +MODULE_AUTHOR("Vincent Palatin <vpalatin@chromium.org>"); +MODULE_DESCRIPTION("Parade ps8622/ps8625 eDP-LVDS converter driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c index 826833e..9d2f053 100644 --- a/drivers/gpu/drm/bridge/ptn3460.c +++ b/drivers/gpu/drm/bridge/ptn3460.c @@ -265,7 +265,7 @@ static struct drm_connector_funcs ptn3460_connector_funcs = { .destroy = ptn3460_connector_destroy, }; -int ptn3460_bridge_attach(struct drm_bridge *bridge) +static int ptn3460_bridge_attach(struct drm_bridge *bridge) { struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); int ret; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index a6caaae..57efdbe 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -134,6 +134,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) connector->funcs->atomic_destroy_state(connector, state->connector_states[i]); + state->connectors[i] = NULL; state->connector_states[i] = NULL; } @@ -145,6 +146,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) crtc->funcs->atomic_destroy_state(crtc, state->crtc_states[i]); + state->crtcs[i] = NULL; state->crtc_states[i] = NULL; } @@ -156,6 +158,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) plane->funcs->atomic_destroy_state(plane, state->plane_states[i]); + state->planes[i] = NULL; state->plane_states[i] = NULL; } } @@ -170,6 +173,9 @@ EXPORT_SYMBOL(drm_atomic_state_clear); */ void drm_atomic_state_free(struct drm_atomic_state *state) { + if (!state) + return; + drm_atomic_state_clear(state); DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); @@ -248,11 +254,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, struct drm_mode_config *config = &dev->mode_config; /* FIXME: Mode prop is missing, which also controls ->enable. */ - if (property == config->prop_active) { + if (property == config->prop_active) state->active = val; - } else if (crtc->funcs->atomic_set_property) + else if (crtc->funcs->atomic_set_property) return crtc->funcs->atomic_set_property(crtc, state, property, val); - return -EINVAL; + else + return -EINVAL; + + return 0; } EXPORT_SYMBOL(drm_atomic_crtc_set_property); @@ -266,9 +275,17 @@ int drm_atomic_crtc_get_property(struct drm_crtc *crtc, const struct drm_crtc_state *state, struct drm_property *property, uint64_t *val) { - if (crtc->funcs->atomic_get_property) + struct drm_device *dev = crtc->dev; + struct drm_mode_config *config = &dev->mode_config; + + if (property == config->prop_active) + *val = state->active; + else if (crtc->funcs->atomic_get_property) return crtc->funcs->atomic_get_property(crtc, state, property, val); - return -EINVAL; + else + return -EINVAL; + + return 0; } /** diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index a745881..41c38ed 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -151,7 +151,7 @@ steal_encoder(struct drm_atomic_state *state, static int update_connector_routing(struct drm_atomic_state *state, int conn_idx) { - struct drm_connector_helper_funcs *funcs; + const struct drm_connector_helper_funcs *funcs; struct drm_encoder *new_encoder; struct drm_crtc *encoder_crtc; struct drm_connector *connector; @@ -264,7 +264,7 @@ mode_fixup(struct drm_atomic_state *state) } for (i = 0; i < state->num_connector; i++) { - struct drm_encoder_helper_funcs *funcs; + const struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; conn_state = state->connector_states[i]; @@ -317,7 +317,7 @@ mode_fixup(struct drm_atomic_state *state) } for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; + const struct drm_crtc_helper_funcs *funcs; struct drm_crtc *crtc; crtc_state = state->crtc_states[i]; @@ -346,7 +346,7 @@ needs_modeset(struct drm_crtc_state *state) } /** - * drm_atomic_helper_check - validate state object for modeset changes + * drm_atomic_helper_check_modeset - validate state object for modeset changes * @dev: DRM device * @state: the driver state object * @@ -461,7 +461,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, EXPORT_SYMBOL(drm_atomic_helper_check_modeset); /** - * drm_atomic_helper_check - validate state object for modeset changes + * drm_atomic_helper_check_planes - validate state object for planes changes * @dev: DRM device * @state: the driver state object * @@ -481,7 +481,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev, int i, ret = 0; for (i = 0; i < nplanes; i++) { - struct drm_plane_helper_funcs *funcs; + const struct drm_plane_helper_funcs *funcs; struct drm_plane *plane = state->planes[i]; struct drm_plane_state *plane_state = state->plane_states[i]; @@ -504,7 +504,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev, } for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; + const struct drm_crtc_helper_funcs *funcs; struct drm_crtc *crtc = state->crtcs[i]; if (!crtc) @@ -571,9 +571,9 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) int i; for (i = 0; i < old_state->num_connector; i++) { + const struct drm_encoder_helper_funcs *funcs; struct drm_connector_state *old_conn_state; struct drm_connector *connector; - struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; struct drm_crtc_state *old_crtc_state; @@ -587,7 +587,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)]; - if (!old_crtc_state->active) + if (!old_crtc_state->active || + !needs_modeset(old_conn_state->crtc->state)) continue; encoder = old_conn_state->best_encoder; @@ -605,7 +606,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) /* * Each encoder has at most one connector (since we always steal - * it away), so we won't call call disable hooks twice. + * it away), so we won't call disable hooks twice. */ if (encoder->bridge) encoder->bridge->funcs->disable(encoder->bridge); @@ -623,7 +624,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) } for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; + const struct drm_crtc_helper_funcs *funcs; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; @@ -713,7 +714,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) int i; for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; + const struct drm_crtc_helper_funcs *funcs; struct drm_crtc *crtc; crtc = old_state->crtcs[i]; @@ -732,9 +733,9 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) } for (i = 0; i < old_state->num_connector; i++) { + const struct drm_encoder_helper_funcs *funcs; struct drm_connector *connector; struct drm_crtc_state *new_crtc_state; - struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; struct drm_display_mode *mode, *adjusted_mode; @@ -757,7 +758,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) /* * Each encoder has at most one connector (since we always steal - * it away), so we won't call call mode_set hooks twice. + * it away), so we won't call mode_set hooks twice. */ if (funcs->mode_set) funcs->mode_set(encoder, mode, adjusted_mode); @@ -812,7 +813,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, int i; for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; + const struct drm_crtc_helper_funcs *funcs; struct drm_crtc *crtc; crtc = old_state->crtcs[i]; @@ -838,8 +839,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, } for (i = 0; i < old_state->num_connector; i++) { + const struct drm_encoder_helper_funcs *funcs; struct drm_connector *connector; - struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; connector = old_state->connectors[i]; @@ -847,7 +848,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, if (!connector || !connector->state->best_encoder) continue; - if (!connector->state->crtc->state->active) + if (!connector->state->crtc->state->active || + !needs_modeset(connector->state->crtc->state)) continue; encoder = connector->state->best_encoder; @@ -858,7 +860,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, /* * Each encoder has at most one connector (since we always steal - * it away), so we won't call call enable hooks twice. + * it away), so we won't call enable hooks twice. */ if (encoder->bridge) encoder->bridge->funcs->pre_enable(encoder->bridge); @@ -1025,7 +1027,7 @@ int drm_atomic_helper_commit(struct drm_device *dev, /* * Everything below can be run asynchronously without the need to grab - * any modeset locks at all under one conditions: It must be guaranteed + * any modeset locks at all under one condition: It must be guaranteed * that the asynchronous work has either been cancelled (if the driver * supports it, which at least requires that the framebuffers get * cleaned up with drm_atomic_helper_cleanup_planes()) or completed @@ -1114,7 +1116,7 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, int ret, i; for (i = 0; i < nplanes; i++) { - struct drm_plane_helper_funcs *funcs; + const struct drm_plane_helper_funcs *funcs; struct drm_plane *plane = state->planes[i]; struct drm_plane_state *plane_state = state->plane_states[i]; struct drm_framebuffer *fb; @@ -1137,7 +1139,7 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, fail: for (i--; i >= 0; i--) { - struct drm_plane_helper_funcs *funcs; + const struct drm_plane_helper_funcs *funcs; struct drm_plane *plane = state->planes[i]; struct drm_plane_state *plane_state = state->plane_states[i]; struct drm_framebuffer *fb; @@ -1179,7 +1181,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, int i; for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; + const struct drm_crtc_helper_funcs *funcs; struct drm_crtc *crtc = old_state->crtcs[i]; if (!crtc) @@ -1194,7 +1196,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, } for (i = 0; i < nplanes; i++) { - struct drm_plane_helper_funcs *funcs; + const struct drm_plane_helper_funcs *funcs; struct drm_plane *plane = old_state->planes[i]; struct drm_plane_state *old_plane_state; @@ -1219,7 +1221,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, } for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; + const struct drm_crtc_helper_funcs *funcs; struct drm_crtc *crtc = old_state->crtcs[i]; if (!crtc) @@ -1254,7 +1256,7 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev, int i; for (i = 0; i < nplanes; i++) { - struct drm_plane_helper_funcs *funcs; + const struct drm_plane_helper_funcs *funcs; struct drm_plane *plane = old_state->planes[i]; struct drm_plane_state *plane_state = old_state->plane_states[i]; struct drm_framebuffer *old_fb; @@ -2001,10 +2003,10 @@ retry: WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); list_for_each_entry(tmp_connector, &config->connector_list, head) { - if (connector->state->crtc != crtc) + if (tmp_connector->state->crtc != crtc) continue; - if (connector->dpms == DRM_MODE_DPMS_ON) { + if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { active = true; break; } @@ -2067,6 +2069,26 @@ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc) EXPORT_SYMBOL(drm_atomic_helper_crtc_reset); /** + * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state + * @crtc: CRTC object + * @state: atomic CRTC state + * + * Copies atomic state from a CRTC's current state and resets inferred values. + * This is useful for drivers that subclass the CRTC state. + */ +void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + memcpy(state, crtc->state, sizeof(*state)); + + state->mode_changed = false; + state->active_changed = false; + state->planes_changed = false; + state->event = NULL; +} +EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state); + +/** * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook * @crtc: drm CRTC * @@ -2081,20 +2103,35 @@ drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc) if (WARN_ON(!crtc->state)) return NULL; - state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL); - - if (state) { - state->mode_changed = false; - state->active_changed = false; - state->planes_changed = false; - state->event = NULL; - } + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_crtc_duplicate_state(crtc, state); return state; } EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state); /** + * __drm_atomic_helper_crtc_destroy_state - release CRTC state + * @crtc: CRTC object + * @state: CRTC state object to release + * + * Releases all resources stored in the CRTC state without actually freeing + * the memory of the CRTC state. This is useful for drivers that subclass the + * CRTC state. + */ +void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + /* + * This is currently a placeholder so that drivers that subclass the + * state will automatically do the right thing if code is ever added + * to this function. + */ +} +EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state); + +/** * drm_atomic_helper_crtc_destroy_state - default state destroy hook * @crtc: drm CRTC * @state: CRTC state object to release @@ -2105,6 +2142,7 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state); void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { + __drm_atomic_helper_crtc_destroy_state(crtc, state); kfree(state); } EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state); @@ -2130,6 +2168,24 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane) EXPORT_SYMBOL(drm_atomic_helper_plane_reset); /** + * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state + * @plane: plane object + * @state: atomic plane state + * + * Copies atomic state from a plane's current state. This is useful for + * drivers that subclass the plane state. + */ +void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + memcpy(state, plane->state, sizeof(*state)); + + if (state->fb) + drm_framebuffer_reference(state->fb); +} +EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state); + +/** * drm_atomic_helper_plane_duplicate_state - default state duplicate hook * @plane: drm plane * @@ -2144,16 +2200,32 @@ drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane) if (WARN_ON(!plane->state)) return NULL; - state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL); - - if (state && state->fb) - drm_framebuffer_reference(state->fb); + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_plane_duplicate_state(plane, state); return state; } EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state); /** + * __drm_atomic_helper_plane_destroy_state - release plane state + * @plane: plane object + * @state: plane state object to release + * + * Releases all resources stored in the plane state without actually freeing + * the memory of the plane state. This is useful for drivers that subclass the + * plane state. + */ +void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + if (state->fb) + drm_framebuffer_unreference(state->fb); +} +EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state); + +/** * drm_atomic_helper_plane_destroy_state - default state destroy hook * @plane: drm plane * @state: plane state object to release @@ -2164,9 +2236,7 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state); void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { - if (state->fb) - drm_framebuffer_unreference(state->fb); - + __drm_atomic_helper_plane_destroy_state(plane, state); kfree(state); } EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); @@ -2190,6 +2260,22 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector) EXPORT_SYMBOL(drm_atomic_helper_connector_reset); /** + * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state + * @connector: connector object + * @state: atomic connector state + * + * Copies atomic state from a connector's current state. This is useful for + * drivers that subclass the connector state. + */ +void +__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, + struct drm_connector_state *state) +{ + memcpy(state, connector->state, sizeof(*state)); +} +EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state); + +/** * drm_atomic_helper_connector_duplicate_state - default state duplicate hook * @connector: drm connector * @@ -2199,14 +2285,41 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_reset); struct drm_connector_state * drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector) { + struct drm_connector_state *state; + if (WARN_ON(!connector->state)) return NULL; - return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL); + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_connector_duplicate_state(connector, state); + + return state; } EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); /** + * __drm_atomic_helper_connector_destroy_state - release connector state + * @connector: connector object + * @state: connector state object to release + * + * Releases all resources stored in the connector state without actually + * freeing the memory of the connector state. This is useful for drivers that + * subclass the connector state. + */ +void +__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state) +{ + /* + * This is currently a placeholder so that drivers that subclass the + * state will automatically do the right thing if code is ever added + * to this function. + */ +} +EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state); + +/** * drm_atomic_helper_connector_destroy_state - default state destroy hook * @connector: drm connector * @state: connector state object to release @@ -2217,6 +2330,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, struct drm_connector_state *state) { + __drm_atomic_helper_connector_destroy_state(connector, state); kfree(state); } EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index d1187e5..eaa5790 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -49,7 +49,7 @@ void drm_bridge_remove(struct drm_bridge *bridge) } EXPORT_SYMBOL(drm_bridge_remove); -extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge) +int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge) { if (!dev || !bridge) return -EINVAL; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6254942..b49dc51 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -660,6 +660,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_mode_config *config = &dev->mode_config; int ret; + WARN_ON(primary && primary->type != DRM_PLANE_TYPE_PRIMARY); + WARN_ON(cursor && cursor->type != DRM_PLANE_TYPE_CURSOR); + crtc->dev = dev; crtc->funcs = funcs; crtc->invert_dimensions = false; @@ -5619,6 +5622,7 @@ struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, mutex_unlock(&dev->mode_config.idr_mutex); return NULL; } +EXPORT_SYMBOL(drm_mode_get_tile_group); /** * drm_mode_create_tile_group - create a tile group from a displayid description @@ -5657,3 +5661,4 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, mutex_unlock(&dev->mode_config.idr_mutex); return tg; } +EXPORT_SYMBOL(drm_mode_create_tile_group); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 3053aab..dd895c4 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -270,7 +270,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; - struct drm_display_mode *adjusted_mode, saved_mode; + struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_encoder_helper_funcs *encoder_funcs; int saved_x, saved_y; @@ -292,6 +292,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, } saved_mode = crtc->mode; + saved_hwmode = crtc->hwmode; saved_x = crtc->x; saved_y = crtc->y; @@ -334,6 +335,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, } DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + crtc->hwmode = *adjusted_mode; + /* Prepare the encoders and CRTCs before setting the mode. */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { @@ -396,9 +399,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, encoder->bridge->funcs->enable(encoder->bridge); } - /* Store real post-adjustment hardware mode. */ - crtc->hwmode = *adjusted_mode; - /* Calculate and store various constants which * are later needed by vblank and swap-completion * timestamping. They are derived from true hwmode. @@ -411,6 +411,7 @@ done: if (!ret) { crtc->enabled = saved_enabled; crtc->mode = saved_mode; + crtc->hwmode = saved_hwmode; crtc->x = saved_x; crtc->y = saved_y; } diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index f128387..71dcbc6 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -427,11 +427,13 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter) * retrying the transaction as appropriate. It is assumed that the * aux->transfer function does not modify anything in the msg other than the * reply field. + * + * Returns bytes transferred on success, or a negative error code on failure. */ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { unsigned int retry; - int err; + int ret; /* * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device @@ -440,14 +442,14 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) */ for (retry = 0; retry < 7; retry++) { mutex_lock(&aux->hw_mutex); - err = aux->transfer(aux, msg); + ret = aux->transfer(aux, msg); mutex_unlock(&aux->hw_mutex); - if (err < 0) { - if (err == -EBUSY) + if (ret < 0) { + if (ret == -EBUSY) continue; - DRM_DEBUG_KMS("transaction failed: %d\n", err); - return err; + DRM_DEBUG_KMS("transaction failed: %d\n", ret); + return ret; } @@ -460,7 +462,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) break; case DP_AUX_NATIVE_REPLY_NACK: - DRM_DEBUG_KMS("native nack\n"); + DRM_DEBUG_KMS("native nack (result=%d, size=%zu)\n", ret, msg->size); return -EREMOTEIO; case DP_AUX_NATIVE_REPLY_DEFER: @@ -488,12 +490,10 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) * Both native ACK and I2C ACK replies received. We * can assume the transfer was successful. */ - if (err < msg->size) - return -EPROTO; - return 0; + return ret; case DP_AUX_I2C_REPLY_NACK: - DRM_DEBUG_KMS("I2C nack\n"); + DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu\n", ret, msg->size); aux->i2c_nack_count++; return -EREMOTEIO; @@ -513,14 +513,55 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) return -EREMOTEIO; } +/* + * Keep retrying drm_dp_i2c_do_msg until all data has been transferred. + * + * Returns an error code on failure, or a recommended transfer size on success. + */ +static int drm_dp_i2c_drain_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *orig_msg) +{ + int err, ret = orig_msg->size; + struct drm_dp_aux_msg msg = *orig_msg; + + while (msg.size > 0) { + err = drm_dp_i2c_do_msg(aux, &msg); + if (err <= 0) + return err == 0 ? -EPROTO : err; + + if (err < msg.size && err < ret) { + DRM_DEBUG_KMS("Partial I2C reply: requested %zu bytes got %d bytes\n", + msg.size, err); + ret = err; + } + + msg.size -= err; + msg.buffer += err; + } + + return ret; +} + +/* + * Bizlink designed DP->DVI-D Dual Link adapters require the I2C over AUX + * packets to be as large as possible. If not, the I2C transactions never + * succeed. Hence the default is maximum. + */ +static int dp_aux_i2c_transfer_size __read_mostly = DP_AUX_MAX_PAYLOAD_BYTES; +module_param_unsafe(dp_aux_i2c_transfer_size, int, 0644); +MODULE_PARM_DESC(dp_aux_i2c_transfer_size, + "Number of bytes to transfer in a single I2C over DP AUX CH message, (1-16, default 16)"); + static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct drm_dp_aux *aux = adapter->algo_data; unsigned int i, j; + unsigned transfer_size; struct drm_dp_aux_msg msg; int err = 0; + dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES); + memset(&msg, 0, sizeof(msg)); for (i = 0; i < num; i++) { @@ -538,20 +579,19 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, err = drm_dp_i2c_do_msg(aux, &msg); if (err < 0) break; - /* - * Many hardware implementations support FIFOs larger than a - * single byte, but it has been empirically determined that - * transferring data in larger chunks can actually lead to - * decreased performance. Therefore each message is simply - * transferred byte-by-byte. + /* We want each transaction to be as large as possible, but + * we'll go to smaller sizes if the hardware gives us a + * short reply. */ - for (j = 0; j < msgs[i].len; j++) { + transfer_size = dp_aux_i2c_transfer_size; + for (j = 0; j < msgs[i].len; j += msg.size) { msg.buffer = msgs[i].buf + j; - msg.size = 1; + msg.size = min(transfer_size, msgs[i].len - j); - err = drm_dp_i2c_do_msg(aux, &msg); + err = drm_dp_i2c_drain_msg(aux, &msg); if (err < 0) break; + transfer_size = err; } if (err < 0) break; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 379ab45..132581c 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2324,6 +2324,19 @@ out: } EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); +int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + int slots = 0; + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return slots; + + slots = port->vcpi.num_slots; + drm_dp_put_port(port); + return slots; +} +EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); + /** * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI * @mgr: manager for this port diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index d512134..48f7359 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -70,7 +70,7 @@ void drm_err(const char *format, ...) vaf.fmt = format; vaf.va = &args; - printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV", + printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV", __builtin_return_address(0), &vaf); va_end(args); diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 732cb6f..4c0aa97 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); kfree(edid); return ret; diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index cc0ae04..5c1aca4 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -304,7 +304,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, } drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); - drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); + drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); offset = fbi->var.xoffset * bytes_per_pixel; offset += fbi->var.yoffset * fb->pitches[0]; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1e6a0c7..309b947 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1034,23 +1034,45 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, crtc_count = 0; for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_display_mode *desired_mode; - int x, y; + struct drm_mode_set *mode_set; + int x, y, j; + /* in case of tile group, are we the last tile vert or horiz? + * If no tile group you are always the last one both vertically + * and horizontally + */ + bool lastv = true, lasth = true; + desired_mode = fb_helper->crtc_info[i].desired_mode; + mode_set = &fb_helper->crtc_info[i].mode_set; + + if (!desired_mode) + continue; + + crtc_count++; + x = fb_helper->crtc_info[i].x; y = fb_helper->crtc_info[i].y; - if (desired_mode) { - if (gamma_size == 0) - gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; - if (desired_mode->hdisplay + x < sizes.fb_width) - sizes.fb_width = desired_mode->hdisplay + x; - if (desired_mode->vdisplay + y < sizes.fb_height) - sizes.fb_height = desired_mode->vdisplay + y; - if (desired_mode->hdisplay + x > sizes.surface_width) - sizes.surface_width = desired_mode->hdisplay + x; - if (desired_mode->vdisplay + y > sizes.surface_height) - sizes.surface_height = desired_mode->vdisplay + y; - crtc_count++; + + if (gamma_size == 0) + gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; + + sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width); + sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height); + + for (j = 0; j < mode_set->num_connectors; j++) { + struct drm_connector *connector = mode_set->connectors[j]; + if (connector->has_tile) { + lasth = (connector->tile_h_loc == (connector->num_h_tile - 1)); + lastv = (connector->tile_v_loc == (connector->num_v_tile - 1)); + /* cloning to multiple tiles is just crazy-talk, so: */ + break; + } } + + if (lasth) + sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width); + if (lastv) + sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height); } if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { @@ -1261,12 +1283,12 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f int width, int height) { struct drm_cmdline_mode *cmdline_mode; - struct drm_display_mode *mode = NULL; + struct drm_display_mode *mode; bool prefer_non_interlace; cmdline_mode = &fb_helper_conn->connector->cmdline_mode; if (cmdline_mode->specified == false) - return mode; + return NULL; /* attempt to find a matching mode in the list of modes * we have gotten so far, if not add a CVT mode that conforms @@ -1275,7 +1297,7 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f goto create_mode; prefer_non_interlace = !cmdline_mode->interlace; - again: +again: list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { /* check width/height */ if (mode->hdisplay != cmdline_mode->xres || diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index f1b32f9..cbb4fc0 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -37,6 +37,7 @@ #include <drm/drmP.h> #include <drm/drm_gem.h> +#include "drm_internal.h" #include "drm_legacy.h" /** diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 2f4c4343..aa8bbb4 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, return 0; } -drm_ioctl_compat_t *drm_compat_ioctls[] = { +static drm_ioctl_compat_t *drm_compat_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique, [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap, diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index a6d773a..266dcd6 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -524,8 +524,13 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) return 0; } -#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ - [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} +#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ + [DRM_IOCTL_NR(ioctl)] = { \ + .cmd = ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } /** Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { @@ -663,39 +668,29 @@ long drm_ioctl(struct file *filp, int retcode = -EINVAL; char stack_kdata[128]; char *kdata = NULL; - unsigned int usize, asize; + unsigned int usize, asize, drv_size; dev = file_priv->minor->dev; if (drm_device_is_unplugged(dev)) return -ENODEV; - if ((nr >= DRM_CORE_IOCTL_COUNT) && - ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) - goto err_i1; - if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && - (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { - u32 drv_size; + if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END) { + /* driver ioctl */ + if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls) + goto err_i1; ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; - drv_size = _IOC_SIZE(ioctl->cmd_drv); - usize = asize = _IOC_SIZE(cmd); - if (drv_size > asize) - asize = drv_size; - cmd = ioctl->cmd_drv; - } - else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { - u32 drv_size; - + } else { + /* core ioctl */ + if (nr >= DRM_CORE_IOCTL_COUNT) + goto err_i1; ioctl = &drm_ioctls[nr]; + } - drv_size = _IOC_SIZE(ioctl->cmd); - usize = asize = _IOC_SIZE(cmd); - if (drv_size > asize) - asize = drv_size; - - cmd = ioctl->cmd; - } else - goto err_i1; + drv_size = _IOC_SIZE(ioctl->cmd); + usize = _IOC_SIZE(cmd); + asize = max(usize, drv_size); + cmd = ioctl->cmd; DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", task_pid_nr(current), @@ -776,12 +771,13 @@ EXPORT_SYMBOL(drm_ioctl); */ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) { - if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) || - (nr < DRM_COMMAND_BASE)) { - *flags = drm_ioctls[nr].flags; - return true; - } + if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END) + return false; + + if (nr >= DRM_CORE_IOCTL_COUNT) + return false; - return false; + *flags = drm_ioctls[nr].flags; + return true; } EXPORT_SYMBOL(drm_ioctl_flags); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 2cca85f..213b11e 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -903,6 +903,12 @@ EXPORT_SYMBOL(drm_mode_duplicate); */ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) { + if (!mode1 && !mode2) + return true; + + if (!mode1 || !mode2) + return false; + /* do clock check convert to PICOS so fb modes get matched * the same */ if (mode1->clock && mode2->clock) { @@ -1148,7 +1154,7 @@ EXPORT_SYMBOL(drm_mode_sort); /** * drm_mode_connector_list_update - update the mode list for the connector * @connector: the connector to update - * @merge_type_bits: whether to merge or overright type bits. + * @merge_type_bits: whether to merge or overwrite type bits * * This moves the modes from the @connector probed_modes list * to the actual mode list. It compares the probed mode against the current diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index fd29f03..1b1bd42 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -27,6 +27,7 @@ #include <linux/dma-mapping.h> #include <linux/export.h> #include <drm/drmP.h> +#include "drm_internal.h" #include "drm_legacy.h" /** diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index b62b036..33807e0 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -353,13 +353,14 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev) if (primary == NULL) { DRM_DEBUG_KMS("Failed to allocate primary plane\n"); return NULL; - /* - * Remove the format_default field from drm_plane when dropping - * this helper. - */ - primary->format_default = true; } + /* + * Remove the format_default field from drm_plane when dropping + * this helper. + */ + primary->format_default = true; + /* possible_crtc's will be filled in later by crtc_init */ ret = drm_universal_plane_init(dev, primary, 0, &drm_primary_helper_funcs, diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 6591d48..3fee587 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; count = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); } else count = (*connector_funcs->get_modes)(connector); } diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 4a2c328..aab49ee 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -41,6 +41,7 @@ #include <linux/slab.h> #endif #include <asm/pgtable.h> +#include "drm_internal.h" #include "drm_legacy.h" struct drm_vma_entry { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 84f8dfe..e71e331 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -76,6 +76,7 @@ static struct fb_ops exynos_drm_fb_ops = { }; static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes, struct drm_framebuffer *fb) { struct fb_info *fbi = helper->fbdev; @@ -85,7 +86,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, unsigned long offset; drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); - drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); + drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); /* RGB formats use only one buffer */ buffer = exynos_drm_fb_buffer(fb, 0); @@ -189,7 +190,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, goto err_destroy_framebuffer; } - ret = exynos_drm_fbdev_update(helper, helper->fb); + ret = exynos_drm_fbdev_update(helper, sizes, helper->fb); if (ret < 0) goto err_dealloc_cmap; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index c300e22..33a10ce 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -147,6 +147,7 @@ struct fimd_win_data { unsigned int ovl_height; unsigned int fb_width; unsigned int fb_height; + unsigned int fb_pitch; unsigned int bpp; unsigned int pixel_format; dma_addr_t dma_addr; @@ -532,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc, win_data->offset_y = plane->crtc_y; win_data->ovl_width = plane->crtc_width; win_data->ovl_height = plane->crtc_height; + win_data->fb_pitch = plane->pitch; win_data->fb_width = plane->fb_width; win_data->fb_height = plane->fb_height; win_data->dma_addr = plane->dma_addr[0] + offset; win_data->bpp = plane->bpp; win_data->pixel_format = plane->pixel_format; - win_data->buf_offsize = (plane->fb_width - plane->crtc_width) * - (plane->bpp >> 3); + win_data->buf_offsize = + plane->pitch - (plane->crtc_width * (plane->bpp >> 3)); win_data->line_size = plane->crtc_width * (plane->bpp >> 3); DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", @@ -704,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos) writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); /* buffer end address */ - size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); + size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3); val = (unsigned long)(win_data->dma_addr + size); writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 3518bc4..2e3bc57 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -55,6 +55,7 @@ struct hdmi_win_data { unsigned int fb_x; unsigned int fb_y; unsigned int fb_width; + unsigned int fb_pitch; unsigned int fb_height; unsigned int src_width; unsigned int src_height; @@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) } else { luma_addr[0] = win_data->dma_addr; chroma_addr[0] = win_data->dma_addr - + (win_data->fb_width * win_data->fb_height); + + (win_data->fb_pitch * win_data->fb_height); } if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) { @@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) luma_addr[1] = luma_addr[0] + 0x40; chroma_addr[1] = chroma_addr[0] + 0x40; } else { - luma_addr[1] = luma_addr[0] + win_data->fb_width; - chroma_addr[1] = chroma_addr[0] + win_data->fb_width; + luma_addr[1] = luma_addr[0] + win_data->fb_pitch; + chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch; } } else { ctx->interlace = false; @@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); /* setting size of input image */ - vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) | + vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) | VP_IMG_VSIZE(win_data->fb_height)); /* chroma height has to reduced by 2 to avoid chroma distorions */ - vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) | + vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) | VP_IMG_VSIZE(win_data->fb_height / 2)); vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width); @@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win) /* converting dma address base and source offset */ dma_addr = win_data->dma_addr + (win_data->fb_x * win_data->bpp >> 3) - + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3); + + (win_data->fb_y * win_data->fb_pitch); src_x_offset = 0; src_y_offset = 0; @@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win) MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); /* setup geometry */ - mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width); + mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), + win_data->fb_pitch / (win_data->bpp >> 3)); /* setup display size */ if (ctx->mxr_ver == MXR_VER_128_0_0_184 && @@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc, win_data->fb_y = plane->fb_y; win_data->fb_width = plane->fb_width; win_data->fb_height = plane->fb_height; + win_data->fb_pitch = plane->pitch; win_data->src_width = plane->src_width; win_data->src_height = plane->src_height; diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c index 61aa824..b728523 100644 --- a/drivers/gpu/drm/i2c/adv7511.c +++ b/drivers/gpu/drm/i2c/adv7511.c @@ -27,12 +27,13 @@ struct adv7511 { struct regmap *regmap; struct regmap *packet_memory_regmap; enum drm_connector_status status; - int dpms_mode; + bool powered; unsigned int f_tmds; unsigned int current_edid_segment; uint8_t edid_buf[256]; + bool edid_read; wait_queue_head_t wq; struct drm_encoder *encoder; @@ -357,6 +358,48 @@ static void adv7511_set_link_config(struct adv7511 *adv7511, adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB; } +static void adv7511_power_on(struct adv7511 *adv7511) +{ + adv7511->current_edid_segment = -1; + + regmap_write(adv7511->regmap, ADV7511_REG_INT(0), + ADV7511_INT0_EDID_READY); + regmap_write(adv7511->regmap, ADV7511_REG_INT(1), + ADV7511_INT1_DDC_ERROR); + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, + ADV7511_POWER_POWER_DOWN, 0); + + /* + * Per spec it is allowed to pulse the HDP signal to indicate that the + * EDID information has changed. Some monitors do this when they wakeup + * from standby or are enabled. When the HDP goes low the adv7511 is + * reset and the outputs are disabled which might cause the monitor to + * go to standby again. To avoid this we ignore the HDP pin for the + * first few seconds after enabling the output. + */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7511_REG_POWER2_HDP_SRC_MASK, + ADV7511_REG_POWER2_HDP_SRC_NONE); + + /* + * Most of the registers are reset during power down or when HPD is low. + */ + regcache_sync(adv7511->regmap); + + adv7511->powered = true; +} + +static void adv7511_power_off(struct adv7511 *adv7511) +{ + /* TODO: setup additional power down modes */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, + ADV7511_POWER_POWER_DOWN, + ADV7511_POWER_POWER_DOWN); + regcache_mark_dirty(adv7511->regmap); + + adv7511->powered = false; +} + /* ----------------------------------------------------------------------------- * Interrupt and hotplug detection */ @@ -379,69 +422,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511) return false; } -static irqreturn_t adv7511_irq_handler(int irq, void *devid) -{ - struct adv7511 *adv7511 = devid; - - if (adv7511_hpd(adv7511)) - drm_helper_hpd_irq_event(adv7511->encoder->dev); - - wake_up_all(&adv7511->wq); - - return IRQ_HANDLED; -} - -static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511, - unsigned int irq) +static int adv7511_irq_process(struct adv7511 *adv7511) { unsigned int irq0, irq1; - unsigned int pending; int ret; ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0); if (ret < 0) - return 0; + return ret; + ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1); if (ret < 0) - return 0; + return ret; - pending = (irq1 << 8) | irq0; + regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); + regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); - return pending & irq; + if (irq0 & ADV7511_INT0_HDP) + drm_helper_hpd_irq_event(adv7511->encoder->dev); + + if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { + adv7511->edid_read = true; + + if (adv7511->i2c_main->irq) + wake_up_all(&adv7511->wq); + } + + return 0; } -static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq, - int timeout) +static irqreturn_t adv7511_irq_handler(int irq, void *devid) +{ + struct adv7511 *adv7511 = devid; + int ret; + + ret = adv7511_irq_process(adv7511); + return ret < 0 ? IRQ_NONE : IRQ_HANDLED; +} + +/* ----------------------------------------------------------------------------- + * EDID retrieval + */ + +static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout) { - unsigned int pending; int ret; if (adv7511->i2c_main->irq) { ret = wait_event_interruptible_timeout(adv7511->wq, - adv7511_is_interrupt_pending(adv7511, irq), - msecs_to_jiffies(timeout)); - if (ret <= 0) - return 0; - pending = adv7511_is_interrupt_pending(adv7511, irq); + adv7511->edid_read, msecs_to_jiffies(timeout)); } else { - if (timeout < 25) - timeout = 25; - do { - pending = adv7511_is_interrupt_pending(adv7511, irq); - if (pending) + for (; timeout > 0; timeout -= 25) { + ret = adv7511_irq_process(adv7511); + if (ret < 0) break; + + if (adv7511->edid_read) + break; + msleep(25); - timeout -= 25; - } while (timeout >= 25); + } } - return pending; + return adv7511->edid_read ? 0 : -EIO; } -/* ----------------------------------------------------------------------------- - * EDID retrieval - */ - static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { @@ -463,19 +508,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block, return ret; if (status != 2) { + adv7511->edid_read = false; regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT, block); - ret = adv7511_wait_for_interrupt(adv7511, - ADV7511_INT0_EDID_READY | - ADV7511_INT1_DDC_ERROR, 200); - - if (!(ret & ADV7511_INT0_EDID_READY)) - return -EIO; + ret = adv7511_wait_for_edid(adv7511, 200); + if (ret < 0) + return ret; } - regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR); - /* Break this apart, hopefully more I2C controllers will * support 64 byte transfers than 256 byte transfers */ @@ -526,9 +566,11 @@ static int adv7511_get_modes(struct drm_encoder *encoder, unsigned int count; /* Reading the EDID only works if the device is powered */ - if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) { + if (!adv7511->powered) { regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR); + ADV7511_INT0_EDID_READY); + regmap_write(adv7511->regmap, ADV7511_REG_INT(1), + ADV7511_INT1_DDC_ERROR); regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, 0); adv7511->current_edid_segment = -1; @@ -536,7 +578,7 @@ static int adv7511_get_modes(struct drm_encoder *encoder, edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511); - if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) + if (!adv7511->powered) regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN); @@ -558,41 +600,10 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode) { struct adv7511 *adv7511 = encoder_to_adv7511(encoder); - switch (mode) { - case DRM_MODE_DPMS_ON: - adv7511->current_edid_segment = -1; - - regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR); - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, - ADV7511_POWER_POWER_DOWN, 0); - /* - * Per spec it is allowed to pulse the HDP signal to indicate - * that the EDID information has changed. Some monitors do this - * when they wakeup from standby or are enabled. When the HDP - * goes low the adv7511 is reset and the outputs are disabled - * which might cause the monitor to go to standby again. To - * avoid this we ignore the HDP pin for the first few seconds - * after enabling the output. - */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HDP_SRC_MASK, - ADV7511_REG_POWER2_HDP_SRC_NONE); - /* Most of the registers are reset during power down or - * when HPD is low - */ - regcache_sync(adv7511->regmap); - break; - default: - /* TODO: setup additional power down modes */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, - ADV7511_POWER_POWER_DOWN, - ADV7511_POWER_POWER_DOWN); - regcache_mark_dirty(adv7511->regmap); - break; - } - - adv7511->dpms_mode = mode; + if (mode == DRM_MODE_DPMS_ON) + adv7511_power_on(adv7511); + else + adv7511_power_off(adv7511); } static enum drm_connector_status @@ -620,10 +631,9 @@ adv7511_encoder_detect(struct drm_encoder *encoder, * there is a pending HPD interrupt and the cable is connected there was * at least one transition from disconnected to connected and the chip * has to be reinitialized. */ - if (status == connector_status_connected && hpd && - adv7511->dpms_mode == DRM_MODE_DPMS_ON) { + if (status == connector_status_connected && hpd && adv7511->powered) { regcache_mark_dirty(adv7511->regmap); - adv7511_encoder_dpms(encoder, adv7511->dpms_mode); + adv7511_power_on(adv7511); adv7511_get_modes(encoder, connector); if (adv7511->status == connector_status_connected) status = connector_status_disconnected; @@ -858,7 +868,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) if (!adv7511) return -ENOMEM; - adv7511->dpms_mode = DRM_MODE_DPMS_OFF; + adv7511->powered = false; adv7511->status = connector_status_disconnected; ret = adv7511_parse_dt(dev->of_node, &link_config); @@ -918,10 +928,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, ADV7511_CEC_CTRL_POWER_DOWN); - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, - ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN); - - adv7511->current_edid_segment = -1; + adv7511_power_off(adv7511); i2c_set_clientdata(i2c, adv7511); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5081e6f..2394924 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4541,12 +4541,116 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, i915_cache_sharing_get, i915_cache_sharing_set, "%llu\n"); +struct sseu_dev_status { + unsigned int slice_total; + unsigned int subslice_total; + unsigned int subslice_per_slice; + unsigned int eu_total; + unsigned int eu_per_subslice; +}; + +static void cherryview_sseu_device_status(struct drm_device *dev, + struct sseu_dev_status *stat) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + const int ss_max = 2; + int ss; + u32 sig1[ss_max], sig2[ss_max]; + + sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); + sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); + sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); + sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); + + for (ss = 0; ss < ss_max; ss++) { + unsigned int eu_cnt; + + if (sig1[ss] & CHV_SS_PG_ENABLE) + /* skip disabled subslice */ + continue; + + stat->slice_total = 1; + stat->subslice_per_slice++; + eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + + ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + + ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + + ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); + stat->eu_total += eu_cnt; + stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt); + } + stat->subslice_total = stat->subslice_per_slice; +} + +static void gen9_sseu_device_status(struct drm_device *dev, + struct sseu_dev_status *stat) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int s_max = 3, ss_max = 4; + int s, ss; + u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; + + /* BXT has a single slice and at most 3 subslices. */ + if (IS_BROXTON(dev)) { + s_max = 1; + ss_max = 3; + } + + for (s = 0; s < s_max; s++) { + s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); + eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); + eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); + } + + eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | + GEN9_PGCTL_SSA_EU19_ACK | + GEN9_PGCTL_SSA_EU210_ACK | + GEN9_PGCTL_SSA_EU311_ACK; + eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | + GEN9_PGCTL_SSB_EU19_ACK | + GEN9_PGCTL_SSB_EU210_ACK | + GEN9_PGCTL_SSB_EU311_ACK; + + for (s = 0; s < s_max; s++) { + unsigned int ss_cnt = 0; + + if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) + /* skip disabled slice */ + continue; + + stat->slice_total++; + + if (IS_SKYLAKE(dev)) + ss_cnt = INTEL_INFO(dev)->subslice_per_slice; + + for (ss = 0; ss < ss_max; ss++) { + unsigned int eu_cnt; + + if (IS_BROXTON(dev) && + !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) + /* skip disabled subslice */ + continue; + + if (IS_BROXTON(dev)) + ss_cnt++; + + eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & + eu_mask[ss%2]); + stat->eu_total += eu_cnt; + stat->eu_per_subslice = max(stat->eu_per_subslice, + eu_cnt); + } + + stat->subslice_total += ss_cnt; + stat->subslice_per_slice = max(stat->subslice_per_slice, + ss_cnt); + } +} + static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0; + struct sseu_dev_status stat; if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev)) return -ENODEV; @@ -4570,79 +4674,22 @@ static int i915_sseu_status(struct seq_file *m, void *unused) yesno(INTEL_INFO(dev)->has_eu_pg)); seq_puts(m, "SSEU Device Status\n"); + memset(&stat, 0, sizeof(stat)); if (IS_CHERRYVIEW(dev)) { - const int ss_max = 2; - int ss; - u32 sig1[ss_max], sig2[ss_max]; - - sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); - sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); - sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); - sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); - - for (ss = 0; ss < ss_max; ss++) { - unsigned int eu_cnt; - - if (sig1[ss] & CHV_SS_PG_ENABLE) - /* skip disabled subslice */ - continue; - - s_tot = 1; - ss_per++; - eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + - ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + - ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + - ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); - eu_tot += eu_cnt; - eu_per = max(eu_per, eu_cnt); - } - ss_tot = ss_per; - } else if (IS_SKYLAKE(dev)) { - const int s_max = 3, ss_max = 4; - int s, ss; - u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; - - s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK); - s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK); - s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK); - eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK); - eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK); - eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK); - eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK); - eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK); - eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK); - eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | - GEN9_PGCTL_SSA_EU19_ACK | - GEN9_PGCTL_SSA_EU210_ACK | - GEN9_PGCTL_SSA_EU311_ACK; - eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | - GEN9_PGCTL_SSB_EU19_ACK | - GEN9_PGCTL_SSB_EU210_ACK | - GEN9_PGCTL_SSB_EU311_ACK; - - for (s = 0; s < s_max; s++) { - if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) - /* skip disabled slice */ - continue; - - s_tot++; - ss_per = INTEL_INFO(dev)->subslice_per_slice; - ss_tot += ss_per; - for (ss = 0; ss < ss_max; ss++) { - unsigned int eu_cnt; - - eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & - eu_mask[ss%2]); - eu_tot += eu_cnt; - eu_per = max(eu_per, eu_cnt); - } - } + cherryview_sseu_device_status(dev, &stat); + } else if (INTEL_INFO(dev)->gen >= 9) { + gen9_sseu_device_status(dev, &stat); } - seq_printf(m, " Enabled Slice Total: %u\n", s_tot); - seq_printf(m, " Enabled Subslice Total: %u\n", ss_tot); - seq_printf(m, " Enabled Subslice Per Slice: %u\n", ss_per); - seq_printf(m, " Enabled EU Total: %u\n", eu_tot); - seq_printf(m, " Enabled EU Per Subslice: %u\n", eu_per); + seq_printf(m, " Enabled Slice Total: %u\n", + stat.slice_total); + seq_printf(m, " Enabled Subslice Total: %u\n", + stat.subslice_total); + seq_printf(m, " Enabled Subslice Per Slice: %u\n", + stat.subslice_per_slice); + seq_printf(m, " Enabled EU Total: %u\n", + stat.eu_total); + seq_printf(m, " Enabled EU Per Subslice: %u\n", + stat.eu_per_subslice); return 0; } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index d67cd9e..e44116f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -564,6 +564,140 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv) #undef SEP_COMMA } +static void cherryview_sseu_info_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_device_info *info; + u32 fuse, eu_dis; + + info = (struct intel_device_info *)&dev_priv->info; + fuse = I915_READ(CHV_FUSE_GT); + + info->slice_total = 1; + + if (!(fuse & CHV_FGT_DISABLE_SS0)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | + CHV_FGT_EU_DIS_SS0_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + if (!(fuse & CHV_FGT_DISABLE_SS1)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | + CHV_FGT_EU_DIS_SS1_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + info->subslice_total = info->subslice_per_slice; + /* + * CHV expected to always have a uniform distribution of EU + * across subslices. + */ + info->eu_per_subslice = info->subslice_total ? + info->eu_total / info->subslice_total : + 0; + /* + * CHV supports subslice power gating on devices with more than + * one subslice, and supports EU power gating on devices with + * more than one EU pair per subslice. + */ + info->has_slice_pg = 0; + info->has_subslice_pg = (info->subslice_total > 1); + info->has_eu_pg = (info->eu_per_subslice > 2); +} + +static void gen9_sseu_info_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_device_info *info; + int s_max = 3, ss_max = 4, eu_max = 8; + int s, ss; + u32 fuse2, s_enable, ss_disable, eu_disable; + u8 eu_mask = 0xff; + + /* + * BXT has a single slice. BXT also has at most 6 EU per subslice, + * and therefore only the lowest 6 bits of the 8-bit EU disable + * fields are valid. + */ + if (IS_BROXTON(dev)) { + s_max = 1; + eu_max = 6; + eu_mask = 0x3f; + } + + info = (struct intel_device_info *)&dev_priv->info; + fuse2 = I915_READ(GEN8_FUSE2); + s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> + GEN8_F2_S_ENA_SHIFT; + ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> + GEN9_F2_SS_DIS_SHIFT; + + info->slice_total = hweight32(s_enable); + /* + * The subslice disable field is global, i.e. it applies + * to each of the enabled slices. + */ + info->subslice_per_slice = ss_max - hweight32(ss_disable); + info->subslice_total = info->slice_total * + info->subslice_per_slice; + + /* + * Iterate through enabled slices and subslices to + * count the total enabled EU. + */ + for (s = 0; s < s_max; s++) { + if (!(s_enable & (0x1 << s))) + /* skip disabled slice */ + continue; + + eu_disable = I915_READ(GEN9_EU_DISABLE(s)); + for (ss = 0; ss < ss_max; ss++) { + int eu_per_ss; + + if (ss_disable & (0x1 << ss)) + /* skip disabled subslice */ + continue; + + eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) & + eu_mask); + + /* + * Record which subslice(s) has(have) 7 EUs. we + * can tune the hash used to spread work among + * subslices if they are unbalanced. + */ + if (eu_per_ss == 7) + info->subslice_7eu[s] |= 1 << ss; + + info->eu_total += eu_per_ss; + } + } + + /* + * SKL is expected to always have a uniform distribution + * of EU across subslices with the exception that any one + * EU in any one subslice may be fused off for die + * recovery. BXT is expected to be perfectly uniform in EU + * distribution. + */ + info->eu_per_subslice = info->subslice_total ? + DIV_ROUND_UP(info->eu_total, + info->subslice_total) : 0; + /* + * SKL supports slice power gating on devices with more than + * one slice, and supports EU power gating on devices with + * more than one EU pair per subslice. BXT supports subslice + * power gating on devices with more than one subslice, and + * supports EU power gating on devices with more than one EU + * pair per subslice. + */ + info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1)); + info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); + info->has_eu_pg = (info->eu_per_subslice > 2); +} + /* * Determine various intel_device_info fields at runtime. * @@ -585,7 +719,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev) info = (struct intel_device_info *)&dev_priv->info; - if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9) + if (IS_BROXTON(dev)) { + info->num_sprites[PIPE_A] = 3; + info->num_sprites[PIPE_B] = 3; + info->num_sprites[PIPE_C] = 2; + } else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9) for_each_pipe(dev_priv, pipe) info->num_sprites[pipe] = 2; else @@ -620,116 +758,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev) } /* Initialize slice/subslice/EU info */ - if (IS_CHERRYVIEW(dev)) { - u32 fuse, eu_dis; - - fuse = I915_READ(CHV_FUSE_GT); - - info->slice_total = 1; - - if (!(fuse & CHV_FGT_DISABLE_SS0)) { - info->subslice_per_slice++; - eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | - CHV_FGT_EU_DIS_SS0_R1_MASK); - info->eu_total += 8 - hweight32(eu_dis); - } - - if (!(fuse & CHV_FGT_DISABLE_SS1)) { - info->subslice_per_slice++; - eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | - CHV_FGT_EU_DIS_SS1_R1_MASK); - info->eu_total += 8 - hweight32(eu_dis); - } - - info->subslice_total = info->subslice_per_slice; - /* - * CHV expected to always have a uniform distribution of EU - * across subslices. - */ - info->eu_per_subslice = info->subslice_total ? - info->eu_total / info->subslice_total : - 0; - /* - * CHV supports subslice power gating on devices with more than - * one subslice, and supports EU power gating on devices with - * more than one EU pair per subslice. - */ - info->has_slice_pg = 0; - info->has_subslice_pg = (info->subslice_total > 1); - info->has_eu_pg = (info->eu_per_subslice > 2); - } else if (IS_SKYLAKE(dev)) { - const int s_max = 3, ss_max = 4, eu_max = 8; - int s, ss; - u32 fuse2, eu_disable[s_max], s_enable, ss_disable; - - fuse2 = I915_READ(GEN8_FUSE2); - s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> - GEN8_F2_S_ENA_SHIFT; - ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> - GEN9_F2_SS_DIS_SHIFT; - - eu_disable[0] = I915_READ(GEN8_EU_DISABLE0); - eu_disable[1] = I915_READ(GEN8_EU_DISABLE1); - eu_disable[2] = I915_READ(GEN8_EU_DISABLE2); - - info->slice_total = hweight32(s_enable); - /* - * The subslice disable field is global, i.e. it applies - * to each of the enabled slices. - */ - info->subslice_per_slice = ss_max - hweight32(ss_disable); - info->subslice_total = info->slice_total * - info->subslice_per_slice; - - /* - * Iterate through enabled slices and subslices to - * count the total enabled EU. - */ - for (s = 0; s < s_max; s++) { - if (!(s_enable & (0x1 << s))) - /* skip disabled slice */ - continue; - - for (ss = 0; ss < ss_max; ss++) { - u32 n_disabled; - - if (ss_disable & (0x1 << ss)) - /* skip disabled subslice */ - continue; + if (IS_CHERRYVIEW(dev)) + cherryview_sseu_info_init(dev); + else if (INTEL_INFO(dev)->gen >= 9) + gen9_sseu_info_init(dev); - n_disabled = hweight8(eu_disable[s] >> - (ss * eu_max)); - - /* - * Record which subslice(s) has(have) 7 EUs. we - * can tune the hash used to spread work among - * subslices if they are unbalanced. - */ - if (eu_max - n_disabled == 7) - info->subslice_7eu[s] |= 1 << ss; - - info->eu_total += eu_max - n_disabled; - } - } - - /* - * SKL is expected to always have a uniform distribution - * of EU across subslices with the exception that any one - * EU in any one subslice may be fused off for die - * recovery. - */ - info->eu_per_subslice = info->subslice_total ? - DIV_ROUND_UP(info->eu_total, - info->subslice_total) : 0; - /* - * SKL supports slice power gating on devices with more than - * one slice, and supports EU power gating on devices with - * more than one EU pair per subslice. - */ - info->has_slice_pg = (info->slice_total > 1) ? 1 : 0; - info->has_subslice_pg = 0; - info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0; - } DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 182d6a7..c1a3cdb5 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -381,6 +381,18 @@ static const struct intel_device_info intel_skylake_gt3_info = { IVB_CURSOR_OFFSETS, }; +static const struct intel_device_info intel_broxton_info = { + .is_preliminary = 1, + .gen = 9, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + .num_pipes = 3, + .has_ddi = 1, + .has_fbc = 1, + GEN_DEFAULT_PIPEOFFSETS, + IVB_CURSOR_OFFSETS, +}; + /* * Make sure any device matches here are from most specific to most * general. For example, since the Quanta match is based on the subsystem @@ -420,7 +432,8 @@ static const struct intel_device_info intel_skylake_gt3_info = { INTEL_CHV_IDS(&intel_cherryview_info), \ INTEL_SKL_GT1_IDS(&intel_skylake_info), \ INTEL_SKL_GT2_IDS(&intel_skylake_info), \ - INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info) \ + INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \ + INTEL_BXT_IDS(&intel_broxton_info) static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_PCI_IDS, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 745dcd8..3b21249 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -130,7 +130,7 @@ enum transcoder { * * This value doesn't count the cursor plane. */ -#define I915_MAX_PLANES 3 +#define I915_MAX_PLANES 4 enum plane { PLANE_A = 0, @@ -2319,6 +2319,7 @@ struct drm_i915_cmd_table { #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) +#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev)) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) @@ -2343,6 +2344,10 @@ struct drm_i915_cmd_table { #define SKL_REVID_D0 (0x3) #define SKL_REVID_E0 (0x4) +#define BXT_REVID_A0 (0x0) +#define BXT_REVID_B0 (0x3) +#define BXT_REVID_C0 (0x6) + /* * The genX designation typically refers to the render engine, so render * capability related checks should use IS_GEN, while display and other checks diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0b67914..9041f3d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1867,7 +1867,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 8) { - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) chv_setup_private_ppat(dev_priv); else bdw_setup_private_ppat(dev_priv); @@ -2433,7 +2433,17 @@ static int ggtt_probe_common(struct drm_device *dev, gtt_phys_addr = pci_resource_start(dev->pdev, 0) + (pci_resource_len(dev->pdev, 0) / 2); - dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); + /* + * On BXT writes larger than 64 bit to the GTT pagetable range will be + * dropped. For WC mappings in general we have 64 byte burst writes + * when the WC buffer is flushed, so we can't use it, but have to + * resort to an uncached mapping. The WC issue is easily caught by the + * readback check when writing GTT PTE entries. + */ + if (IS_BROXTON(dev)) + dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size); + else + dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); if (!dev_priv->gtt.gsm) { DRM_ERROR("Failed to map the gtt page table\n"); return -ENOMEM; @@ -2555,7 +2565,7 @@ static int gen8_gmch_probe(struct drm_device *dev, *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT; - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) chv_setup_private_ppat(dev_priv); else bdw_setup_private_ppat(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index f8da716..348ed5a 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -209,7 +209,7 @@ static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp) dev_priv->fbc.threshold = ret; - if (HAS_PCH_SPLIT(dev)) + if (INTEL_INFO(dev_priv)->gen >= 5) I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index da5c86a..87d9c0f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1148,6 +1148,7 @@ enum skl_disp_power_wells { /* control register for cpu gtt access */ #define TILECTL 0x101000 #define TILECTL_SWZCTL (1 << 0) +#define TILECTL_TLBPF (1 << 1) #define TILECTL_TLB_PREFETCH_DIS (1 << 2) #define TILECTL_BACKSNOOP_DIS (1 << 3) @@ -1552,9 +1553,7 @@ enum skl_disp_power_wells { #define GEN9_F2_SS_DIS_SHIFT 20 #define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT) -#define GEN8_EU_DISABLE0 0x9134 -#define GEN8_EU_DISABLE1 0x9138 -#define GEN8_EU_DISABLE2 0x913c +#define GEN9_EU_DISABLE(slice) (0x9134 + (slice)*0x4) #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) @@ -5347,9 +5346,11 @@ enum skl_disp_power_wells { #define GEN8_PIPE_VSYNC (1 << 1) #define GEN8_PIPE_VBLANK (1 << 0) #define GEN9_PIPE_CURSOR_FAULT (1 << 11) +#define GEN9_PIPE_PLANE4_FAULT (1 << 10) #define GEN9_PIPE_PLANE3_FAULT (1 << 9) #define GEN9_PIPE_PLANE2_FAULT (1 << 8) #define GEN9_PIPE_PLANE1_FAULT (1 << 7) +#define GEN9_PIPE_PLANE4_FLIP_DONE (1 << 6) #define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5) #define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4) #define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3) @@ -5360,6 +5361,7 @@ enum skl_disp_power_wells { GEN8_PIPE_PRIMARY_FAULT) #define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \ (GEN9_PIPE_CURSOR_FAULT | \ + GEN9_PIPE_PLANE4_FAULT | \ GEN9_PIPE_PLANE3_FAULT | \ GEN9_PIPE_PLANE2_FAULT | \ GEN9_PIPE_PLANE1_FAULT) @@ -5477,6 +5479,10 @@ enum skl_disp_power_wells { #define HDC_FORCE_NON_COHERENT (1<<4) #define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10) +/* GEN9 chicken */ +#define SLICE_ECO_CHICKEN0 0x7308 +#define PIXEL_MASK_CAMMING_DISABLE (1 << 14) + /* WaCatErrorRejectionIssue */ #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) @@ -6236,6 +6242,7 @@ enum skl_disp_power_wells { #define GEN8_UCGCTL6 0x9430 #define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24) #define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) +#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28) #define GEN6_GFXPAUSE 0xA000 #define GEN6_RPNSWREQ 0xA008 @@ -6403,17 +6410,12 @@ enum skl_disp_power_wells { #define CHV_POWER_SS1_SIG2 0xa72c #define CHV_EU311_PG_ENABLE (1<<1) -#define GEN9_SLICE0_PGCTL_ACK 0x804c -#define GEN9_SLICE1_PGCTL_ACK 0x8050 -#define GEN9_SLICE2_PGCTL_ACK 0x8054 +#define GEN9_SLICE_PGCTL_ACK(slice) (0x804c + (slice)*0x4) #define GEN9_PGCTL_SLICE_ACK (1 << 0) +#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2)) -#define GEN9_SLICE0_SS01_EU_PGCTL_ACK 0x805c -#define GEN9_SLICE0_SS23_EU_PGCTL_ACK 0x8060 -#define GEN9_SLICE1_SS01_EU_PGCTL_ACK 0x8064 -#define GEN9_SLICE1_SS23_EU_PGCTL_ACK 0x8068 -#define GEN9_SLICE2_SS01_EU_PGCTL_ACK 0x806c -#define GEN9_SLICE2_SS23_EU_PGCTL_ACK 0x8070 +#define GEN9_SS01_EU_PGCTL_ACK(slice) (0x805c + (slice)*0x8) +#define GEN9_SS23_EU_PGCTL_ACK(slice) (0x8060 + (slice)*0x8) #define GEN9_PGCTL_SSA_EU08_ACK (1 << 0) #define GEN9_PGCTL_SSA_EU19_ACK (1 << 2) #define GEN9_PGCTL_SSA_EU210_ACK (1 << 4) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 486f6fa..5b50484 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -322,7 +322,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, uint32_t reg = DDI_BUF_CTL(port); int i; - for (i = 0; i < 8; i++) { + for (i = 0; i < 16; i++) { udelay(1); if (I915_READ(reg) & DDI_BUF_IS_IDLE) return; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 97922fb..9f72d9a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13505,7 +13505,16 @@ static void intel_setup_outputs(struct drm_device *dev) if (intel_crt_present(dev)) intel_crt_init(dev); - if (HAS_DDI(dev)) { + if (IS_BROXTON(dev)) { + /* + * FIXME: Broxton doesn't support port detection via the + * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to + * detect the ports. + */ + intel_ddi_init(dev, PORT_A); + intel_ddi_init(dev, PORT_B); + intel_ddi_init(dev, PORT_C); + } else if (HAS_DDI(dev)) { int found; /* diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 0fc35dd..a798d75 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1207,6 +1207,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf, { struct intel_engine_cs *ring = ringbuf->ring; u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; + bool vf_flush_wa; u32 flags = 0; int ret; @@ -1228,10 +1229,26 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf, flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; } - ret = intel_logical_ring_begin(ringbuf, ctx, 6); + /* + * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe + * control. + */ + vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 && + flags & PIPE_CONTROL_VF_CACHE_INVALIDATE; + + ret = intel_logical_ring_begin(ringbuf, ctx, vf_flush_wa ? 12 : 6); if (ret) return ret; + if (vf_flush_wa) { + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + } + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); intel_logical_ring_emit(ringbuf, flags); intel_logical_ring_emit(ringbuf, scratch_addr); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index fc7e0c7..a80bfd5 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -98,6 +98,26 @@ static void skl_init_clock_gating(struct drm_device *dev) GEN8_LQSC_RO_PERF_DIS); } +static void bxt_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + gen9_init_clock_gating(dev); + + /* + * FIXME: + * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only. + * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. + */ + /* WaDisableSDEUnitClockGating:bxt */ + I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | + GEN8_SDEUNIT_CLOCK_GATE_DISABLE | + GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); + + /* FIXME: apply on A0 only */ + I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); +} + static void i915_pineview_get_mem_freq(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2542,6 +2562,7 @@ static bool ilk_disable_lp_wm(struct drm_device *dev) */ #define SKL_DDB_SIZE 896 /* in blocks */ +#define BXT_DDB_SIZE 512 static void skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, @@ -2560,7 +2581,10 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, return; } - ddb_size = SKL_DDB_SIZE; + if (IS_BROXTON(dev)) + ddb_size = BXT_DDB_SIZE; + else + ddb_size = SKL_DDB_SIZE; ddb_size -= 4; /* 4 blocks for bypass path allocation */ @@ -6570,7 +6594,12 @@ void intel_init_pm(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 9) { skl_setup_wm_latency(dev); - dev_priv->display.init_clock_gating = skl_init_clock_gating; + if (IS_BROXTON(dev)) + dev_priv->display.init_clock_gating = + bxt_init_clock_gating; + else if (IS_SKYLAKE(dev)) + dev_priv->display.init_clock_gating = + skl_init_clock_gating; dev_priv->display.update_wm = skl_update_wm; dev_priv->display.update_sprite_wm = skl_update_sprite_wm; } else if (HAS_PCH_SPLIT(dev)) { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 9f3fc81..de8c074 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -969,6 +969,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, GEN9_CCS_TLB_PREFETCH_ENABLE); + /* + * FIXME: don't apply the following on BXT for stepping C. On BXT A0 + * the flag reads back as 0. + */ + /* WaDisableMaskBasedCammingInRCC:sklC,bxtA */ + if (INTEL_REVID(dev) == SKL_REVID_C0 || IS_BROXTON(dev)) + WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, + PIXEL_MASK_CAMMING_DISABLE); + return 0; } @@ -1030,6 +1039,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) return skl_tune_iz_hashing(ring); } +static int bxt_init_workarounds(struct intel_engine_cs *ring) +{ + gen9_init_workarounds(ring); + + return 0; +} + int init_workarounds_ring(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; @@ -1047,8 +1063,9 @@ int init_workarounds_ring(struct intel_engine_cs *ring) if (IS_SKYLAKE(dev)) return skl_init_workarounds(ring); - else if (IS_GEN9(dev)) - return gen9_init_workarounds(ring); + + if (IS_BROXTON(dev)) + return bxt_init_workarounds(ring); return 0; } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0cb3767..e3d41c0 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -1144,7 +1144,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); plane = drm_plane_find(dev, set->plane_id); - if (!plane) { + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { ret = -ENOENT; goto out_unlock; } diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index bacbbb7..0a6f676 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -35,3 +35,14 @@ config DRM_MSM_REGISTER_LOGGING Compile in support for logging register reads/writes in a format that can be parsed by envytools demsm tool. If enabled, register logging can be switched on via msm.reglog=y module param. + +config DRM_MSM_DSI + bool "Enable DSI support in MSM DRM driver" + depends on DRM_MSM + select DRM_PANEL + select DRM_MIPI_DSI + default y + help + Choose this option if you have a need for MIPI DSI connector + support. + diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 674a132..ab20867 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -50,5 +50,10 @@ msm-y := \ msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o +msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ + dsi/dsi_host.o \ + dsi/dsi_manager.o \ + dsi/dsi_phy.o \ + mdp/mdp5/mdp5_cmd_encoder.o obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c new file mode 100644 index 0000000..28d1f95 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi.h" + +struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi) +{ + if (!msm_dsi || !msm_dsi->panel) + return NULL; + + return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ? + msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] : + msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID]; +} + +static void dsi_destroy(struct msm_dsi *msm_dsi) +{ + if (!msm_dsi) + return; + + msm_dsi_manager_unregister(msm_dsi); + if (msm_dsi->host) { + msm_dsi_host_destroy(msm_dsi->host); + msm_dsi->host = NULL; + } + + platform_set_drvdata(msm_dsi->pdev, NULL); +} + +static struct msm_dsi *dsi_init(struct platform_device *pdev) +{ + struct msm_dsi *msm_dsi = NULL; + int ret; + + if (!pdev) { + dev_err(&pdev->dev, "no dsi device\n"); + ret = -ENXIO; + goto fail; + } + + msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL); + if (!msm_dsi) { + ret = -ENOMEM; + goto fail; + } + DBG("dsi probed=%p", msm_dsi); + + msm_dsi->pdev = pdev; + platform_set_drvdata(pdev, msm_dsi); + + /* Init dsi host */ + ret = msm_dsi_host_init(msm_dsi); + if (ret) + goto fail; + + /* Register to dsi manager */ + ret = msm_dsi_manager_register(msm_dsi); + if (ret) + goto fail; + + return msm_dsi; + +fail: + if (msm_dsi) + dsi_destroy(msm_dsi); + + return ERR_PTR(ret); +} + +static int dsi_bind(struct device *dev, struct device *master, void *data) +{ + struct drm_device *drm = dev_get_drvdata(master); + struct msm_drm_private *priv = drm->dev_private; + struct platform_device *pdev = to_platform_device(dev); + struct msm_dsi *msm_dsi; + + DBG(""); + msm_dsi = dsi_init(pdev); + if (IS_ERR(msm_dsi)) + return PTR_ERR(msm_dsi); + + priv->dsi[msm_dsi->id] = msm_dsi; + + return 0; +} + +static void dsi_unbind(struct device *dev, struct device *master, + void *data) +{ + struct drm_device *drm = dev_get_drvdata(master); + struct msm_drm_private *priv = drm->dev_private; + struct msm_dsi *msm_dsi = dev_get_drvdata(dev); + int id = msm_dsi->id; + + if (priv->dsi[id]) { + dsi_destroy(msm_dsi); + priv->dsi[id] = NULL; + } +} + +static const struct component_ops dsi_ops = { + .bind = dsi_bind, + .unbind = dsi_unbind, +}; + +static int dsi_dev_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &dsi_ops); +} + +static int dsi_dev_remove(struct platform_device *pdev) +{ + DBG(""); + component_del(&pdev->dev, &dsi_ops); + return 0; +} + +static const struct of_device_id dt_match[] = { + { .compatible = "qcom,mdss-dsi-ctrl" }, + {} +}; + +static struct platform_driver dsi_driver = { + .probe = dsi_dev_probe, + .remove = dsi_dev_remove, + .driver = { + .name = "msm_dsi", + .of_match_table = dt_match, + }, +}; + +void __init msm_dsi_register(void) +{ + DBG(""); + platform_driver_register(&dsi_driver); +} + +void __exit msm_dsi_unregister(void) +{ + DBG(""); + platform_driver_unregister(&dsi_driver); +} + +int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, + struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) +{ + struct msm_drm_private *priv = dev->dev_private; + int ret, i; + + if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] || + !encoders[MSM_DSI_CMD_ENCODER_ID])) + return -EINVAL; + + msm_dsi->dev = dev; + + ret = msm_dsi_host_modeset_init(msm_dsi->host, dev); + if (ret) { + dev_err(dev->dev, "failed to modeset init host: %d\n", ret); + goto fail; + } + + msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id); + if (IS_ERR(msm_dsi->bridge)) { + ret = PTR_ERR(msm_dsi->bridge); + dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret); + msm_dsi->bridge = NULL; + goto fail; + } + + msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id); + if (IS_ERR(msm_dsi->connector)) { + ret = PTR_ERR(msm_dsi->connector); + dev_err(dev->dev, "failed to create dsi connector: %d\n", ret); + msm_dsi->connector = NULL; + goto fail; + } + + for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { + encoders[i]->bridge = msm_dsi->bridge; + msm_dsi->encoders[i] = encoders[i]; + } + + priv->bridges[priv->num_bridges++] = msm_dsi->bridge; + priv->connectors[priv->num_connectors++] = msm_dsi->connector; + + return 0; +fail: + if (msm_dsi) { + /* bridge/connector are normally destroyed by drm: */ + if (msm_dsi->bridge) { + msm_dsi_manager_bridge_destroy(msm_dsi->bridge); + msm_dsi->bridge = NULL; + } + if (msm_dsi->connector) { + msm_dsi->connector->funcs->destroy(msm_dsi->connector); + msm_dsi->connector = NULL; + } + } + + return ret; +} + diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h new file mode 100644 index 0000000..10f54d4 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DSI_CONNECTOR_H__ +#define __DSI_CONNECTOR_H__ + +#include <linux/platform_device.h> + +#include "drm_crtc.h" +#include "drm_mipi_dsi.h" +#include "drm_panel.h" + +#include "msm_drv.h" + +#define DSI_0 0 +#define DSI_1 1 +#define DSI_MAX 2 + +#define DSI_CLOCK_MASTER DSI_0 +#define DSI_CLOCK_SLAVE DSI_1 + +#define DSI_LEFT DSI_0 +#define DSI_RIGHT DSI_1 + +/* According to the current drm framework sequence, take the encoder of + * DSI_1 as master encoder + */ +#define DSI_ENCODER_MASTER DSI_1 +#define DSI_ENCODER_SLAVE DSI_0 + +struct msm_dsi { + struct drm_device *dev; + struct platform_device *pdev; + + struct drm_connector *connector; + struct drm_bridge *bridge; + + struct mipi_dsi_host *host; + struct msm_dsi_phy *phy; + struct drm_panel *panel; + unsigned long panel_flags; + bool phy_enabled; + + /* the encoders we are hooked to (outside of dsi block) */ + struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]; + + int id; +}; + +/* dsi manager */ +struct drm_bridge *msm_dsi_manager_bridge_init(u8 id); +void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge); +struct drm_connector *msm_dsi_manager_connector_init(u8 id); +int msm_dsi_manager_phy_enable(int id, + const unsigned long bit_rate, const unsigned long esc_rate, + u32 *clk_pre, u32 *clk_post); +void msm_dsi_manager_phy_disable(int id); +int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); +bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len); +int msm_dsi_manager_register(struct msm_dsi *msm_dsi); +void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); + +/* msm dsi */ +struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi); + +/* dsi host */ +int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, + u32 iova, u32 len); +int msm_dsi_host_enable(struct mipi_dsi_host *host); +int msm_dsi_host_disable(struct mipi_dsi_host *host); +int msm_dsi_host_power_on(struct mipi_dsi_host *host); +int msm_dsi_host_power_off(struct mipi_dsi_host *host); +int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, + struct drm_display_mode *mode); +struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, + unsigned long *panel_flags); +int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); +void msm_dsi_host_unregister(struct mipi_dsi_host *host); +void msm_dsi_host_destroy(struct mipi_dsi_host *host); +int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, + struct drm_device *dev); +int msm_dsi_host_init(struct msm_dsi *msm_dsi); + +/* dsi phy */ +struct msm_dsi_phy; +enum msm_dsi_phy_type { + MSM_DSI_PHY_UNKNOWN, + MSM_DSI_PHY_28NM, + MSM_DSI_PHY_MAX +}; +struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id); +int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, + const unsigned long bit_rate, const unsigned long esc_rate); +int msm_dsi_phy_disable(struct msm_dsi_phy *phy); +void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, + u32 *clk_pre, u32 *clk_post); +#endif /* __DSI_CONNECTOR_H__ */ + diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index abf1bba..1dcfae2 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) - -Copyright (C) 2013 by the following authors: +- /usr2/hali/local/envytools/envytools/rnndb/dsi/dsi.xml ( 18681 bytes, from 2015-03-04 23:08:31) +- /usr2/hali/local/envytools/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-01-28 21:43:22) + +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -51,11 +42,11 @@ enum dsi_traffic_mode { BURST_MODE = 2, }; -enum dsi_dst_format { - DST_FORMAT_RGB565 = 0, - DST_FORMAT_RGB666 = 1, - DST_FORMAT_RGB666_LOOSE = 2, - DST_FORMAT_RGB888 = 3, +enum dsi_vid_dst_format { + VID_DST_FORMAT_RGB565 = 0, + VID_DST_FORMAT_RGB666 = 1, + VID_DST_FORMAT_RGB666_LOOSE = 2, + VID_DST_FORMAT_RGB888 = 3, }; enum dsi_rgb_swap { @@ -69,20 +60,63 @@ enum dsi_rgb_swap { enum dsi_cmd_trigger { TRIGGER_NONE = 0, + TRIGGER_SEOF = 1, TRIGGER_TE = 2, TRIGGER_SW = 4, TRIGGER_SW_SEOF = 5, TRIGGER_SW_TE = 6, }; +enum dsi_cmd_dst_format { + CMD_DST_FORMAT_RGB111 = 0, + CMD_DST_FORMAT_RGB332 = 3, + CMD_DST_FORMAT_RGB444 = 4, + CMD_DST_FORMAT_RGB565 = 6, + CMD_DST_FORMAT_RGB666 = 7, + CMD_DST_FORMAT_RGB888 = 8, +}; + +enum dsi_lane_swap { + LANE_SWAP_0123 = 0, + LANE_SWAP_3012 = 1, + LANE_SWAP_2301 = 2, + LANE_SWAP_1230 = 3, + LANE_SWAP_0321 = 4, + LANE_SWAP_1032 = 5, + LANE_SWAP_2103 = 6, + LANE_SWAP_3210 = 7, +}; + #define DSI_IRQ_CMD_DMA_DONE 0x00000001 #define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002 #define DSI_IRQ_CMD_MDP_DONE 0x00000100 #define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200 #define DSI_IRQ_VIDEO_DONE 0x00010000 #define DSI_IRQ_MASK_VIDEO_DONE 0x00020000 +#define DSI_IRQ_BTA_DONE 0x00100000 +#define DSI_IRQ_MASK_BTA_DONE 0x00200000 #define DSI_IRQ_ERROR 0x01000000 #define DSI_IRQ_MASK_ERROR 0x02000000 +#define REG_DSI_6G_HW_VERSION 0x00000000 +#define DSI_6G_HW_VERSION_MAJOR__MASK 0xf0000000 +#define DSI_6G_HW_VERSION_MAJOR__SHIFT 28 +static inline uint32_t DSI_6G_HW_VERSION_MAJOR(uint32_t val) +{ + return ((val) << DSI_6G_HW_VERSION_MAJOR__SHIFT) & DSI_6G_HW_VERSION_MAJOR__MASK; +} +#define DSI_6G_HW_VERSION_MINOR__MASK 0x0fff0000 +#define DSI_6G_HW_VERSION_MINOR__SHIFT 16 +static inline uint32_t DSI_6G_HW_VERSION_MINOR(uint32_t val) +{ + return ((val) << DSI_6G_HW_VERSION_MINOR__SHIFT) & DSI_6G_HW_VERSION_MINOR__MASK; +} +#define DSI_6G_HW_VERSION_STEP__MASK 0x0000ffff +#define DSI_6G_HW_VERSION_STEP__SHIFT 0 +static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val) +{ + return ((val) << DSI_6G_HW_VERSION_STEP__SHIFT) & DSI_6G_HW_VERSION_STEP__MASK; +} + #define REG_DSI_CTRL 0x00000000 #define DSI_CTRL_ENABLE 0x00000001 #define DSI_CTRL_VID_MODE_EN 0x00000002 @@ -96,11 +130,15 @@ enum dsi_cmd_trigger { #define DSI_CTRL_CRC_CHECK 0x01000000 #define REG_DSI_STATUS0 0x00000004 +#define DSI_STATUS0_CMD_MODE_ENGINE_BUSY 0x00000001 #define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002 +#define DSI_STATUS0_CMD_MODE_MDP_BUSY 0x00000004 #define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008 #define DSI_STATUS0_DSI_BUSY 0x00000010 +#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION 0x80000000 #define REG_DSI_FIFO_STATUS 0x00000008 +#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW 0x00000080 #define REG_DSI_VID_CFG0 0x0000000c #define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003 @@ -111,7 +149,7 @@ static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val) } #define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030 #define DSI_VID_CFG0_DST_FORMAT__SHIFT 4 -static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val) +static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_vid_dst_format val) { return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK; } @@ -129,21 +167,15 @@ static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val) #define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000 #define REG_DSI_VID_CFG1 0x0000001c -#define DSI_VID_CFG1_R_SEL 0x00000010 -#define DSI_VID_CFG1_G_SEL 0x00000100 -#define DSI_VID_CFG1_B_SEL 0x00001000 -#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000 -#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16 +#define DSI_VID_CFG1_R_SEL 0x00000001 +#define DSI_VID_CFG1_G_SEL 0x00000010 +#define DSI_VID_CFG1_B_SEL 0x00000100 +#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00007000 +#define DSI_VID_CFG1_RGB_SWAP__SHIFT 12 static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val) { return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK; } -#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000 -#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20 -static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val) -{ - return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK; -} #define REG_DSI_ACTIVE_H 0x00000020 #define DSI_ACTIVE_H_START__MASK 0x00000fff @@ -201,32 +233,115 @@ static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val) return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK; } -#define REG_DSI_ACTIVE_VSYNC 0x00000034 -#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff -#define DSI_ACTIVE_VSYNC_START__SHIFT 0 -static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val) +#define REG_DSI_ACTIVE_VSYNC_HPOS 0x00000030 +#define DSI_ACTIVE_VSYNC_HPOS_START__MASK 0x00000fff +#define DSI_ACTIVE_VSYNC_HPOS_START__SHIFT 0 +static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_START(uint32_t val) { - return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK; + return ((val) << DSI_ACTIVE_VSYNC_HPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_START__MASK; } -#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000 -#define DSI_ACTIVE_VSYNC_END__SHIFT 16 -static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val) +#define DSI_ACTIVE_VSYNC_HPOS_END__MASK 0x0fff0000 +#define DSI_ACTIVE_VSYNC_HPOS_END__SHIFT 16 +static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_END(uint32_t val) { - return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK; + return ((val) << DSI_ACTIVE_VSYNC_HPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_END__MASK; +} + +#define REG_DSI_ACTIVE_VSYNC_VPOS 0x00000034 +#define DSI_ACTIVE_VSYNC_VPOS_START__MASK 0x00000fff +#define DSI_ACTIVE_VSYNC_VPOS_START__SHIFT 0 +static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_START(uint32_t val) +{ + return ((val) << DSI_ACTIVE_VSYNC_VPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_START__MASK; +} +#define DSI_ACTIVE_VSYNC_VPOS_END__MASK 0x0fff0000 +#define DSI_ACTIVE_VSYNC_VPOS_END__SHIFT 16 +static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_END(uint32_t val) +{ + return ((val) << DSI_ACTIVE_VSYNC_VPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_END__MASK; } #define REG_DSI_CMD_DMA_CTRL 0x00000038 +#define DSI_CMD_DMA_CTRL_BROADCAST_EN 0x80000000 #define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000 #define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000 #define REG_DSI_CMD_CFG0 0x0000003c +#define DSI_CMD_CFG0_DST_FORMAT__MASK 0x0000000f +#define DSI_CMD_CFG0_DST_FORMAT__SHIFT 0 +static inline uint32_t DSI_CMD_CFG0_DST_FORMAT(enum dsi_cmd_dst_format val) +{ + return ((val) << DSI_CMD_CFG0_DST_FORMAT__SHIFT) & DSI_CMD_CFG0_DST_FORMAT__MASK; +} +#define DSI_CMD_CFG0_R_SEL 0x00000010 +#define DSI_CMD_CFG0_G_SEL 0x00000100 +#define DSI_CMD_CFG0_B_SEL 0x00001000 +#define DSI_CMD_CFG0_INTERLEAVE_MAX__MASK 0x00f00000 +#define DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT 20 +static inline uint32_t DSI_CMD_CFG0_INTERLEAVE_MAX(uint32_t val) +{ + return ((val) << DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT) & DSI_CMD_CFG0_INTERLEAVE_MAX__MASK; +} +#define DSI_CMD_CFG0_RGB_SWAP__MASK 0x00070000 +#define DSI_CMD_CFG0_RGB_SWAP__SHIFT 16 +static inline uint32_t DSI_CMD_CFG0_RGB_SWAP(enum dsi_rgb_swap val) +{ + return ((val) << DSI_CMD_CFG0_RGB_SWAP__SHIFT) & DSI_CMD_CFG0_RGB_SWAP__MASK; +} #define REG_DSI_CMD_CFG1 0x00000040 +#define DSI_CMD_CFG1_WR_MEM_START__MASK 0x000000ff +#define DSI_CMD_CFG1_WR_MEM_START__SHIFT 0 +static inline uint32_t DSI_CMD_CFG1_WR_MEM_START(uint32_t val) +{ + return ((val) << DSI_CMD_CFG1_WR_MEM_START__SHIFT) & DSI_CMD_CFG1_WR_MEM_START__MASK; +} +#define DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK 0x0000ff00 +#define DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT 8 +static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val) +{ + return ((val) << DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT) & DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK; +} +#define DSI_CMD_CFG1_INSERT_DCS_COMMAND 0x00010000 #define REG_DSI_DMA_BASE 0x00000044 #define REG_DSI_DMA_LEN 0x00000048 +#define REG_DSI_CMD_MDP_STREAM_CTRL 0x00000054 +#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK 0x0000003f +#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT 0 +static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK; +} +#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300 +#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT 8 +static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK; +} +#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK 0xffff0000 +#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT 16 +static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK; +} + +#define REG_DSI_CMD_MDP_STREAM_TOTAL 0x00000058 +#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK 0x00000fff +#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT 0 +static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK; +} +#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK 0x0fff0000 +#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT 16 +static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK; +} + #define REG_DSI_ACK_ERR_STATUS 0x00000064 static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; } @@ -234,19 +349,25 @@ static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; } static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; } #define REG_DSI_TRIG_CTRL 0x00000080 -#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f +#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x00000007 #define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0 static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val) { return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK; } -#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0 +#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x00000070 #define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4 static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val) { return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK; } -#define DSI_TRIG_CTRL_STREAM 0x00000100 +#define DSI_TRIG_CTRL_STREAM__MASK 0x00000300 +#define DSI_TRIG_CTRL_STREAM__SHIFT 8 +static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val) +{ + return ((val) << DSI_TRIG_CTRL_STREAM__SHIFT) & DSI_TRIG_CTRL_STREAM__MASK; +} +#define DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME 0x00001000 #define DSI_TRIG_CTRL_TE 0x80000000 #define REG_DSI_TRIG_DMA 0x0000008c @@ -274,6 +395,12 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val) #define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010 #define REG_DSI_LANE_SWAP_CTRL 0x000000ac +#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007 +#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0 +static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val) +{ + return ((val) << DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT) & DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK; +} #define REG_DSI_ERR_INT_MASK0 0x00000108 @@ -282,8 +409,36 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val) #define REG_DSI_RESET 0x00000114 #define REG_DSI_CLK_CTRL 0x00000118 +#define DSI_CLK_CTRL_AHBS_HCLK_ON 0x00000001 +#define DSI_CLK_CTRL_AHBM_SCLK_ON 0x00000002 +#define DSI_CLK_CTRL_PCLK_ON 0x00000004 +#define DSI_CLK_CTRL_DSICLK_ON 0x00000008 +#define DSI_CLK_CTRL_BYTECLK_ON 0x00000010 +#define DSI_CLK_CTRL_ESCCLK_ON 0x00000020 +#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK 0x00000200 + +#define REG_DSI_CLK_STATUS 0x0000011c +#define DSI_CLK_STATUS_PLL_UNLOCKED 0x00010000 #define REG_DSI_PHY_RESET 0x00000128 +#define DSI_PHY_RESET_RESET 0x00000001 + +#define REG_DSI_RDBK_DATA_CTRL 0x000001d0 +#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000 +#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16 +static inline uint32_t DSI_RDBK_DATA_CTRL_COUNT(uint32_t val) +{ + return ((val) << DSI_RDBK_DATA_CTRL_COUNT__SHIFT) & DSI_RDBK_DATA_CTRL_COUNT__MASK; +} +#define DSI_RDBK_DATA_CTRL_CLR 0x00000001 + +#define REG_DSI_VERSION 0x000001f0 +#define DSI_VERSION_MAJOR__MASK 0xff000000 +#define DSI_VERSION_MAJOR__SHIFT 24 +static inline uint32_t DSI_VERSION_MAJOR(uint32_t val) +{ + return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK; +} #define REG_DSI_PHY_PLL_CTRL_0 0x00000200 #define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001 @@ -501,5 +656,184 @@ static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x000003 #define REG_DSI_8960_PHY_CAL_STATUS 0x00000550 #define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010 +static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; } + +static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; } + +#define REG_DSI_28nm_PHY_LNCK_CFG_0 0x00000100 + +#define REG_DSI_28nm_PHY_LNCK_CFG_1 0x00000104 + +#define REG_DSI_28nm_PHY_LNCK_CFG_2 0x00000108 + +#define REG_DSI_28nm_PHY_LNCK_CFG_3 0x0000010c + +#define REG_DSI_28nm_PHY_LNCK_CFG_4 0x00000110 + +#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH 0x00000114 + +#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL 0x00000118 + +#define REG_DSI_28nm_PHY_LNCK_TEST_STR0 0x0000011c + +#define REG_DSI_28nm_PHY_LNCK_TEST_STR1 0x00000120 + +#define REG_DSI_28nm_PHY_TIMING_CTRL_0 0x00000140 +#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff +#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK; +} + +#define REG_DSI_28nm_PHY_TIMING_CTRL_1 0x00000144 +#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff +#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK; +} + +#define REG_DSI_28nm_PHY_TIMING_CTRL_2 0x00000148 +#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff +#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK; +} + +#define REG_DSI_28nm_PHY_TIMING_CTRL_3 0x0000014c +#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001 + +#define REG_DSI_28nm_PHY_TIMING_CTRL_4 0x00000150 +#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff +#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK; +} + +#define REG_DSI_28nm_PHY_TIMING_CTRL_5 0x00000154 +#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff +#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK; +} + +#define REG_DSI_28nm_PHY_TIMING_CTRL_6 0x00000158 +#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff +#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK; +} + +#define REG_DSI_28nm_PHY_TIMING_CTRL_7 0x0000015c +#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff +#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK; +} + +#define REG_DSI_28nm_PHY_TIMING_CTRL_8 0x00000160 +#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff +#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK; +} + +#define REG_DSI_28nm_PHY_TIMING_CTRL_9 0x00000164 +#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007 +#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK; +} +#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070 +#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK; +} + +#define REG_DSI_28nm_PHY_TIMING_CTRL_10 0x00000168 +#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007 +#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK; +} + +#define REG_DSI_28nm_PHY_TIMING_CTRL_11 0x0000016c +#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff +#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0 +static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) +{ + return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK; +} + +#define REG_DSI_28nm_PHY_CTRL_0 0x00000170 + +#define REG_DSI_28nm_PHY_CTRL_1 0x00000174 + +#define REG_DSI_28nm_PHY_CTRL_2 0x00000178 + +#define REG_DSI_28nm_PHY_CTRL_3 0x0000017c + +#define REG_DSI_28nm_PHY_CTRL_4 0x00000180 + +#define REG_DSI_28nm_PHY_STRENGTH_0 0x00000184 + +#define REG_DSI_28nm_PHY_STRENGTH_1 0x00000188 + +#define REG_DSI_28nm_PHY_BIST_CTRL_0 0x000001b4 + +#define REG_DSI_28nm_PHY_BIST_CTRL_1 0x000001b8 + +#define REG_DSI_28nm_PHY_BIST_CTRL_2 0x000001bc + +#define REG_DSI_28nm_PHY_BIST_CTRL_3 0x000001c0 + +#define REG_DSI_28nm_PHY_BIST_CTRL_4 0x000001c4 + +#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8 + +#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4 + +#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc + +#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0 0x00000000 + +#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1 0x00000004 + +#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2 0x00000008 + +#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3 0x0000000c + +#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4 0x00000010 + +#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5 0x00000014 + +#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 + #endif /* DSI_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c new file mode 100644 index 0000000..fdc54e3 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -0,0 +1,1993 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/of_irq.h> +#include <linux/regulator/consumer.h> +#include <linux/spinlock.h> +#include <video/mipi_display.h> + +#include "dsi.h" +#include "dsi.xml.h" + +#define MSM_DSI_VER_MAJOR_V2 0x02 +#define MSM_DSI_VER_MAJOR_6G 0x03 +#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000 +#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000 +#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001 +#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 +#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 + +#define DSI_6G_REG_SHIFT 4 + +#define DSI_REGULATOR_MAX 8 +struct dsi_reg_entry { + char name[32]; + int min_voltage; + int max_voltage; + int enable_load; + int disable_load; +}; + +struct dsi_reg_config { + int num; + struct dsi_reg_entry regs[DSI_REGULATOR_MAX]; +}; + +struct dsi_config { + u32 major; + u32 minor; + u32 io_offset; + enum msm_dsi_phy_type phy_type; + struct dsi_reg_config reg_cfg; +}; + +static const struct dsi_config dsi_cfgs[] = { + {MSM_DSI_VER_MAJOR_V2, 0, 0, MSM_DSI_PHY_UNKNOWN}, + { /* 8974 v1 */ + .major = MSM_DSI_VER_MAJOR_6G, + .minor = MSM_DSI_6G_VER_MINOR_V1_0, + .io_offset = DSI_6G_REG_SHIFT, + .phy_type = MSM_DSI_PHY_28NM, + .reg_cfg = { + .num = 4, + .regs = { + {"gdsc", -1, -1, -1, -1}, + {"vdd", 3000000, 3000000, 150000, 100}, + {"vdda", 1200000, 1200000, 100000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, + }, + { /* 8974 v2 */ + .major = MSM_DSI_VER_MAJOR_6G, + .minor = MSM_DSI_6G_VER_MINOR_V1_1, + .io_offset = DSI_6G_REG_SHIFT, + .phy_type = MSM_DSI_PHY_28NM, + .reg_cfg = { + .num = 4, + .regs = { + {"gdsc", -1, -1, -1, -1}, + {"vdd", 3000000, 3000000, 150000, 100}, + {"vdda", 1200000, 1200000, 100000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, + }, + { /* 8974 v3 */ + .major = MSM_DSI_VER_MAJOR_6G, + .minor = MSM_DSI_6G_VER_MINOR_V1_1_1, + .io_offset = DSI_6G_REG_SHIFT, + .phy_type = MSM_DSI_PHY_28NM, + .reg_cfg = { + .num = 4, + .regs = { + {"gdsc", -1, -1, -1, -1}, + {"vdd", 3000000, 3000000, 150000, 100}, + {"vdda", 1200000, 1200000, 100000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, + }, + { /* 8084 */ + .major = MSM_DSI_VER_MAJOR_6G, + .minor = MSM_DSI_6G_VER_MINOR_V1_2, + .io_offset = DSI_6G_REG_SHIFT, + .phy_type = MSM_DSI_PHY_28NM, + .reg_cfg = { + .num = 4, + .regs = { + {"gdsc", -1, -1, -1, -1}, + {"vdd", 3000000, 3000000, 150000, 100}, + {"vdda", 1200000, 1200000, 100000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, + }, + { /* 8916 */ + .major = MSM_DSI_VER_MAJOR_6G, + .minor = MSM_DSI_6G_VER_MINOR_V1_3_1, + .io_offset = DSI_6G_REG_SHIFT, + .phy_type = MSM_DSI_PHY_28NM, + .reg_cfg = { + .num = 4, + .regs = { + {"gdsc", -1, -1, -1, -1}, + {"vdd", 2850000, 2850000, 100000, 100}, + {"vdda", 1200000, 1200000, 100000, 100}, + {"vddio", 1800000, 1800000, 100000, 100}, + }, + }, + }, +}; + +static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) +{ + u32 ver; + u32 ver_6g; + + if (!major || !minor) + return -EINVAL; + + /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0 + * makes all other registers 4-byte shifted down. + */ + ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION); + if (ver_6g == 0) { + ver = msm_readl(base + REG_DSI_VERSION); + ver = FIELD(ver, DSI_VERSION_MAJOR); + if (ver <= MSM_DSI_VER_MAJOR_V2) { + /* old versions */ + *major = ver; + *minor = 0; + return 0; + } else { + return -EINVAL; + } + } else { + ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION); + ver = FIELD(ver, DSI_VERSION_MAJOR); + if (ver == MSM_DSI_VER_MAJOR_6G) { + /* 6G version */ + *major = ver; + *minor = ver_6g; + return 0; + } else { + return -EINVAL; + } + } +} + +#define DSI_ERR_STATE_ACK 0x0000 +#define DSI_ERR_STATE_TIMEOUT 0x0001 +#define DSI_ERR_STATE_DLN0_PHY 0x0002 +#define DSI_ERR_STATE_FIFO 0x0004 +#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008 +#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010 +#define DSI_ERR_STATE_PLL_UNLOCKED 0x0020 + +#define DSI_CLK_CTRL_ENABLE_CLKS \ + (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \ + DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \ + DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \ + DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK) + +struct msm_dsi_host { + struct mipi_dsi_host base; + + struct platform_device *pdev; + struct drm_device *dev; + + int id; + + void __iomem *ctrl_base; + struct regulator_bulk_data supplies[DSI_REGULATOR_MAX]; + struct clk *mdp_core_clk; + struct clk *ahb_clk; + struct clk *axi_clk; + struct clk *mmss_misc_ahb_clk; + struct clk *byte_clk; + struct clk *esc_clk; + struct clk *pixel_clk; + u32 byte_clk_rate; + + struct gpio_desc *disp_en_gpio; + struct gpio_desc *te_gpio; + + const struct dsi_config *cfg; + + struct completion dma_comp; + struct completion video_comp; + struct mutex dev_mutex; + struct mutex cmd_mutex; + struct mutex clk_mutex; + spinlock_t intr_lock; /* Protect interrupt ctrl register */ + + u32 err_work_state; + struct work_struct err_work; + struct workqueue_struct *workqueue; + + struct drm_gem_object *tx_gem_obj; + u8 *rx_buf; + + struct drm_display_mode *mode; + + /* Panel info */ + struct device_node *panel_node; + unsigned int channel; + unsigned int lanes; + enum mipi_dsi_pixel_format format; + unsigned long mode_flags; + + u32 dma_cmd_ctrl_restore; + + bool registered; + bool power_on; + int irq; +}; + +static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt) +{ + switch (fmt) { + case MIPI_DSI_FMT_RGB565: return 16; + case MIPI_DSI_FMT_RGB666_PACKED: return 18; + case MIPI_DSI_FMT_RGB666: + case MIPI_DSI_FMT_RGB888: + default: return 24; + } +} + +static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg) +{ + return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg); +} +static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) +{ + msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg); +} + +static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host); +static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host); + +static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host) +{ + const struct dsi_config *cfg; + struct regulator *gdsc_reg; + int i, ret; + u32 major = 0, minor = 0; + + gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc"); + if (IS_ERR_OR_NULL(gdsc_reg)) { + pr_err("%s: cannot get gdsc\n", __func__); + goto fail; + } + ret = regulator_enable(gdsc_reg); + if (ret) { + pr_err("%s: unable to enable gdsc\n", __func__); + regulator_put(gdsc_reg); + goto fail; + } + ret = clk_prepare_enable(msm_host->ahb_clk); + if (ret) { + pr_err("%s: unable to enable ahb_clk\n", __func__); + regulator_disable(gdsc_reg); + regulator_put(gdsc_reg); + goto fail; + } + + ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); + + clk_disable_unprepare(msm_host->ahb_clk); + regulator_disable(gdsc_reg); + regulator_put(gdsc_reg); + if (ret) { + pr_err("%s: Invalid version\n", __func__); + goto fail; + } + + for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) { + cfg = dsi_cfgs + i; + if ((cfg->major == major) && (cfg->minor == minor)) + return cfg; + } + pr_err("%s: Version %x:%x not support\n", __func__, major, minor); + +fail: + return NULL; +} + +static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) +{ + return container_of(host, struct msm_dsi_host, base); +} + +static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host) +{ + struct regulator_bulk_data *s = msm_host->supplies; + const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; + int num = msm_host->cfg->reg_cfg.num; + int i; + + DBG(""); + for (i = num - 1; i >= 0; i--) + if (regs[i].disable_load >= 0) + regulator_set_optimum_mode(s[i].consumer, + regs[i].disable_load); + + regulator_bulk_disable(num, s); +} + +static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host) +{ + struct regulator_bulk_data *s = msm_host->supplies; + const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; + int num = msm_host->cfg->reg_cfg.num; + int ret, i; + + DBG(""); + for (i = 0; i < num; i++) { + if (regs[i].enable_load >= 0) { + ret = regulator_set_optimum_mode(s[i].consumer, + regs[i].enable_load); + if (ret < 0) { + pr_err("regulator %d set op mode failed, %d\n", + i, ret); + goto fail; + } + } + } + + ret = regulator_bulk_enable(num, s); + if (ret < 0) { + pr_err("regulator enable failed, %d\n", ret); + goto fail; + } + + return 0; + +fail: + for (i--; i >= 0; i--) + regulator_set_optimum_mode(s[i].consumer, regs[i].disable_load); + return ret; +} + +static int dsi_regulator_init(struct msm_dsi_host *msm_host) +{ + struct regulator_bulk_data *s = msm_host->supplies; + const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; + int num = msm_host->cfg->reg_cfg.num; + int i, ret; + + for (i = 0; i < num; i++) + s[i].supply = regs[i].name; + + ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s); + if (ret < 0) { + pr_err("%s: failed to init regulator, ret=%d\n", + __func__, ret); + return ret; + } + + for (i = 0; i < num; i++) { + if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) { + ret = regulator_set_voltage(s[i].consumer, + regs[i].min_voltage, regs[i].max_voltage); + if (ret < 0) { + pr_err("regulator %d set voltage failed, %d\n", + i, ret); + return ret; + } + } + } + + return 0; +} + +static int dsi_clk_init(struct msm_dsi_host *msm_host) +{ + struct device *dev = &msm_host->pdev->dev; + int ret = 0; + + msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk"); + if (IS_ERR(msm_host->mdp_core_clk)) { + ret = PTR_ERR(msm_host->mdp_core_clk); + pr_err("%s: Unable to get mdp core clk. ret=%d\n", + __func__, ret); + goto exit; + } + + msm_host->ahb_clk = devm_clk_get(dev, "iface_clk"); + if (IS_ERR(msm_host->ahb_clk)) { + ret = PTR_ERR(msm_host->ahb_clk); + pr_err("%s: Unable to get mdss ahb clk. ret=%d\n", + __func__, ret); + goto exit; + } + + msm_host->axi_clk = devm_clk_get(dev, "bus_clk"); + if (IS_ERR(msm_host->axi_clk)) { + ret = PTR_ERR(msm_host->axi_clk); + pr_err("%s: Unable to get axi bus clk. ret=%d\n", + __func__, ret); + goto exit; + } + + msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk"); + if (IS_ERR(msm_host->mmss_misc_ahb_clk)) { + ret = PTR_ERR(msm_host->mmss_misc_ahb_clk); + pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n", + __func__, ret); + goto exit; + } + + msm_host->byte_clk = devm_clk_get(dev, "byte_clk"); + if (IS_ERR(msm_host->byte_clk)) { + ret = PTR_ERR(msm_host->byte_clk); + pr_err("%s: can't find dsi_byte_clk. ret=%d\n", + __func__, ret); + msm_host->byte_clk = NULL; + goto exit; + } + + msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk"); + if (IS_ERR(msm_host->pixel_clk)) { + ret = PTR_ERR(msm_host->pixel_clk); + pr_err("%s: can't find dsi_pixel_clk. ret=%d\n", + __func__, ret); + msm_host->pixel_clk = NULL; + goto exit; + } + + msm_host->esc_clk = devm_clk_get(dev, "core_clk"); + if (IS_ERR(msm_host->esc_clk)) { + ret = PTR_ERR(msm_host->esc_clk); + pr_err("%s: can't find dsi_esc_clk. ret=%d\n", + __func__, ret); + msm_host->esc_clk = NULL; + goto exit; + } + +exit: + return ret; +} + +static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host) +{ + int ret; + + DBG("id=%d", msm_host->id); + + ret = clk_prepare_enable(msm_host->mdp_core_clk); + if (ret) { + pr_err("%s: failed to enable mdp_core_clock, %d\n", + __func__, ret); + goto core_clk_err; + } + + ret = clk_prepare_enable(msm_host->ahb_clk); + if (ret) { + pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret); + goto ahb_clk_err; + } + + ret = clk_prepare_enable(msm_host->axi_clk); + if (ret) { + pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret); + goto axi_clk_err; + } + + ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk); + if (ret) { + pr_err("%s: failed to enable mmss misc ahb clk, %d\n", + __func__, ret); + goto misc_ahb_clk_err; + } + + return 0; + +misc_ahb_clk_err: + clk_disable_unprepare(msm_host->axi_clk); +axi_clk_err: + clk_disable_unprepare(msm_host->ahb_clk); +ahb_clk_err: + clk_disable_unprepare(msm_host->mdp_core_clk); +core_clk_err: + return ret; +} + +static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host) +{ + DBG(""); + clk_disable_unprepare(msm_host->mmss_misc_ahb_clk); + clk_disable_unprepare(msm_host->axi_clk); + clk_disable_unprepare(msm_host->ahb_clk); + clk_disable_unprepare(msm_host->mdp_core_clk); +} + +static int dsi_link_clk_enable(struct msm_dsi_host *msm_host) +{ + int ret; + + DBG("Set clk rates: pclk=%d, byteclk=%d", + msm_host->mode->clock, msm_host->byte_clk_rate); + + ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); + if (ret) { + pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret); + goto error; + } + + ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000); + if (ret) { + pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); + goto error; + } + + ret = clk_prepare_enable(msm_host->esc_clk); + if (ret) { + pr_err("%s: Failed to enable dsi esc clk\n", __func__); + goto error; + } + + ret = clk_prepare_enable(msm_host->byte_clk); + if (ret) { + pr_err("%s: Failed to enable dsi byte clk\n", __func__); + goto byte_clk_err; + } + + ret = clk_prepare_enable(msm_host->pixel_clk); + if (ret) { + pr_err("%s: Failed to enable dsi pixel clk\n", __func__); + goto pixel_clk_err; + } + + return 0; + +pixel_clk_err: + clk_disable_unprepare(msm_host->byte_clk); +byte_clk_err: + clk_disable_unprepare(msm_host->esc_clk); +error: + return ret; +} + +static void dsi_link_clk_disable(struct msm_dsi_host *msm_host) +{ + clk_disable_unprepare(msm_host->esc_clk); + clk_disable_unprepare(msm_host->pixel_clk); + clk_disable_unprepare(msm_host->byte_clk); +} + +static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable) +{ + int ret = 0; + + mutex_lock(&msm_host->clk_mutex); + if (enable) { + ret = dsi_bus_clk_enable(msm_host); + if (ret) { + pr_err("%s: Can not enable bus clk, %d\n", + __func__, ret); + goto unlock_ret; + } + ret = dsi_link_clk_enable(msm_host); + if (ret) { + pr_err("%s: Can not enable link clk, %d\n", + __func__, ret); + dsi_bus_clk_disable(msm_host); + goto unlock_ret; + } + } else { + dsi_link_clk_disable(msm_host); + dsi_bus_clk_disable(msm_host); + } + +unlock_ret: + mutex_unlock(&msm_host->clk_mutex); + return ret; +} + +static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host) +{ + struct drm_display_mode *mode = msm_host->mode; + u8 lanes = msm_host->lanes; + u32 bpp = dsi_get_bpp(msm_host->format); + u32 pclk_rate; + + if (!mode) { + pr_err("%s: mode not set\n", __func__); + return -EINVAL; + } + + pclk_rate = mode->clock * 1000; + if (lanes > 0) { + msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes); + } else { + pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__); + msm_host->byte_clk_rate = (pclk_rate * bpp) / 8; + } + + DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate); + + return 0; +} + +static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host) +{ + DBG(""); + dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); + /* Make sure fully reset */ + wmb(); + udelay(1000); + dsi_write(msm_host, REG_DSI_PHY_RESET, 0); + udelay(100); +} + +static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable) +{ + u32 intr; + unsigned long flags; + + spin_lock_irqsave(&msm_host->intr_lock, flags); + intr = dsi_read(msm_host, REG_DSI_INTR_CTRL); + + if (enable) + intr |= mask; + else + intr &= ~mask; + + DBG("intr=%x enable=%d", intr, enable); + + dsi_write(msm_host, REG_DSI_INTR_CTRL, intr); + spin_unlock_irqrestore(&msm_host->intr_lock, flags); +} + +static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags) +{ + if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST) + return BURST_MODE; + else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) + return NON_BURST_SYNCH_PULSE; + + return NON_BURST_SYNCH_EVENT; +} + +static inline enum dsi_vid_dst_format dsi_get_vid_fmt( + const enum mipi_dsi_pixel_format mipi_fmt) +{ + switch (mipi_fmt) { + case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888; + case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE; + case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666; + case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565; + default: return VID_DST_FORMAT_RGB888; + } +} + +static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( + const enum mipi_dsi_pixel_format mipi_fmt) +{ + switch (mipi_fmt) { + case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; + case MIPI_DSI_FMT_RGB666_PACKED: + case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666; + case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; + default: return CMD_DST_FORMAT_RGB888; + } +} + +static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, + u32 clk_pre, u32 clk_post) +{ + u32 flags = msm_host->mode_flags; + enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; + u32 data = 0; + + if (!enable) { + dsi_write(msm_host, REG_DSI_CTRL, 0); + return; + } + + if (flags & MIPI_DSI_MODE_VIDEO) { + if (flags & MIPI_DSI_MODE_VIDEO_HSE) + data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE; + if (flags & MIPI_DSI_MODE_VIDEO_HFP) + data |= DSI_VID_CFG0_HFP_POWER_STOP; + if (flags & MIPI_DSI_MODE_VIDEO_HBP) + data |= DSI_VID_CFG0_HBP_POWER_STOP; + if (flags & MIPI_DSI_MODE_VIDEO_HSA) + data |= DSI_VID_CFG0_HSA_POWER_STOP; + /* Always set low power stop mode for BLLP + * to let command engine send packets + */ + data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP | + DSI_VID_CFG0_BLLP_POWER_STOP; + data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags)); + data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt)); + data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel); + dsi_write(msm_host, REG_DSI_VID_CFG0, data); + + /* Do not swap RGB colors */ + data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB); + dsi_write(msm_host, REG_DSI_VID_CFG1, 0); + } else { + /* Do not swap RGB colors */ + data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB); + data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt)); + dsi_write(msm_host, REG_DSI_CMD_CFG0, data); + + data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) | + DSI_CMD_CFG1_WR_MEM_CONTINUE( + MIPI_DCS_WRITE_MEMORY_CONTINUE); + /* Always insert DCS command */ + data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND; + dsi_write(msm_host, REG_DSI_CMD_CFG1, data); + } + + dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, + DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER | + DSI_CMD_DMA_CTRL_LOW_POWER); + + data = 0; + /* Always assume dedicated TE pin */ + data |= DSI_TRIG_CTRL_TE; + data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE); + data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW); + data |= DSI_TRIG_CTRL_STREAM(msm_host->channel); + if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) && + (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2)) + data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; + dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); + + data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) | + DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre); + dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data); + + data = 0; + if (!(flags & MIPI_DSI_MODE_EOT_PACKET)) + data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; + dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data); + + /* allow only ack-err-status to generate interrupt */ + dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0); + + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); + + dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); + + data = DSI_CTRL_CLK_EN; + + DBG("lane number=%d", msm_host->lanes); + if (msm_host->lanes == 2) { + data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2; + /* swap lanes for 2-lane panel for better performance */ + dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, + DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230)); + } else { + /* Take 4 lanes as default */ + data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 | + DSI_CTRL_LANE3; + /* Do not swap lanes for 4-lane panel */ + dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, + DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123)); + } + data |= DSI_CTRL_ENABLE; + + dsi_write(msm_host, REG_DSI_CTRL, data); +} + +static void dsi_timing_setup(struct msm_dsi_host *msm_host) +{ + struct drm_display_mode *mode = msm_host->mode; + u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */ + u32 h_total = mode->htotal; + u32 v_total = mode->vtotal; + u32 hs_end = mode->hsync_end - mode->hsync_start; + u32 vs_end = mode->vsync_end - mode->vsync_start; + u32 ha_start = h_total - mode->hsync_start; + u32 ha_end = ha_start + mode->hdisplay; + u32 va_start = v_total - mode->vsync_start; + u32 va_end = va_start + mode->vdisplay; + u32 wc; + + DBG(""); + + if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) { + dsi_write(msm_host, REG_DSI_ACTIVE_H, + DSI_ACTIVE_H_START(ha_start) | + DSI_ACTIVE_H_END(ha_end)); + dsi_write(msm_host, REG_DSI_ACTIVE_V, + DSI_ACTIVE_V_START(va_start) | + DSI_ACTIVE_V_END(va_end)); + dsi_write(msm_host, REG_DSI_TOTAL, + DSI_TOTAL_H_TOTAL(h_total - 1) | + DSI_TOTAL_V_TOTAL(v_total - 1)); + + dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC, + DSI_ACTIVE_HSYNC_START(hs_start) | + DSI_ACTIVE_HSYNC_END(hs_end)); + dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0); + dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS, + DSI_ACTIVE_VSYNC_VPOS_START(vs_start) | + DSI_ACTIVE_VSYNC_VPOS_END(vs_end)); + } else { /* command mode */ + /* image data and 1 byte write_memory_start cmd */ + wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1; + + dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL, + DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) | + DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL( + msm_host->channel) | + DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE( + MIPI_DSI_DCS_LONG_WRITE)); + + dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL, + DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) | + DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay)); + } +} + +static void dsi_sw_reset(struct msm_dsi_host *msm_host) +{ + dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); + wmb(); /* clocks need to be enabled before reset */ + + dsi_write(msm_host, REG_DSI_RESET, 1); + wmb(); /* make sure reset happen */ + dsi_write(msm_host, REG_DSI_RESET, 0); +} + +static void dsi_op_mode_config(struct msm_dsi_host *msm_host, + bool video_mode, bool enable) +{ + u32 dsi_ctrl; + + dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL); + + if (!enable) { + dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN | + DSI_CTRL_CMD_MODE_EN); + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE | + DSI_IRQ_MASK_VIDEO_DONE, 0); + } else { + if (video_mode) { + dsi_ctrl |= DSI_CTRL_VID_MODE_EN; + } else { /* command mode */ + dsi_ctrl |= DSI_CTRL_CMD_MODE_EN; + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1); + } + dsi_ctrl |= DSI_CTRL_ENABLE; + } + + dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl); +} + +static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host) +{ + u32 data; + + data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL); + + if (mode == 0) + data &= ~DSI_CMD_DMA_CTRL_LOW_POWER; + else + data |= DSI_CMD_DMA_CTRL_LOW_POWER; + + dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data); +} + +static void dsi_wait4video_done(struct msm_dsi_host *msm_host) +{ + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); + + reinit_completion(&msm_host->video_comp); + + wait_for_completion_timeout(&msm_host->video_comp, + msecs_to_jiffies(70)); + + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); +} + +static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host) +{ + if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) + return; + + if (msm_host->power_on) { + dsi_wait4video_done(msm_host); + /* delay 4 ms to skip BLLP */ + usleep_range(2000, 4000); + } +} + +/* dsi_cmd */ +static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) +{ + struct drm_device *dev = msm_host->dev; + int ret; + u32 iova; + + mutex_lock(&dev->struct_mutex); + msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED); + if (IS_ERR(msm_host->tx_gem_obj)) { + ret = PTR_ERR(msm_host->tx_gem_obj); + pr_err("%s: failed to allocate gem, %d\n", __func__, ret); + msm_host->tx_gem_obj = NULL; + mutex_unlock(&dev->struct_mutex); + return ret; + } + + ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova); + if (ret) { + pr_err("%s: failed to get iova, %d\n", __func__, ret); + return ret; + } + mutex_unlock(&dev->struct_mutex); + + if (iova & 0x07) { + pr_err("%s: buf NOT 8 bytes aligned\n", __func__); + return -EINVAL; + } + + return 0; +} + +static void dsi_tx_buf_free(struct msm_dsi_host *msm_host) +{ + struct drm_device *dev = msm_host->dev; + + if (msm_host->tx_gem_obj) { + msm_gem_put_iova(msm_host->tx_gem_obj, 0); + mutex_lock(&dev->struct_mutex); + msm_gem_free_object(msm_host->tx_gem_obj); + msm_host->tx_gem_obj = NULL; + mutex_unlock(&dev->struct_mutex); + } +} + +/* + * prepare cmd buffer to be txed + */ +static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem, + const struct mipi_dsi_msg *msg) +{ + struct mipi_dsi_packet packet; + int len; + int ret; + u8 *data; + + ret = mipi_dsi_create_packet(&packet, msg); + if (ret) { + pr_err("%s: create packet failed, %d\n", __func__, ret); + return ret; + } + len = (packet.size + 3) & (~0x3); + + if (len > tx_gem->size) { + pr_err("%s: packet size is too big\n", __func__); + return -EINVAL; + } + + data = msm_gem_vaddr(tx_gem); + + if (IS_ERR(data)) { + ret = PTR_ERR(data); + pr_err("%s: get vaddr failed, %d\n", __func__, ret); + return ret; + } + + /* MSM specific command format in memory */ + data[0] = packet.header[1]; + data[1] = packet.header[2]; + data[2] = packet.header[0]; + data[3] = BIT(7); /* Last packet */ + if (mipi_dsi_packet_format_is_long(msg->type)) + data[3] |= BIT(6); + if (msg->rx_buf && msg->rx_len) + data[3] |= BIT(5); + + /* Long packet */ + if (packet.payload && packet.payload_length) + memcpy(data + 4, packet.payload, packet.payload_length); + + /* Append 0xff to the end */ + if (packet.size < len) + memset(data + packet.size, 0xff, len - packet.size); + + return len; +} + +/* + * dsi_short_read1_resp: 1 parameter + */ +static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg) +{ + u8 *data = msg->rx_buf; + if (data && (msg->rx_len >= 1)) { + *data = buf[1]; /* strip out dcs type */ + return 1; + } else { + pr_err("%s: read data does not match with rx_buf len %d\n", + __func__, msg->rx_len); + return -EINVAL; + } +} + +/* + * dsi_short_read2_resp: 2 parameter + */ +static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg) +{ + u8 *data = msg->rx_buf; + if (data && (msg->rx_len >= 2)) { + data[0] = buf[1]; /* strip out dcs type */ + data[1] = buf[2]; + return 2; + } else { + pr_err("%s: read data does not match with rx_buf len %d\n", + __func__, msg->rx_len); + return -EINVAL; + } +} + +static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg) +{ + /* strip out 4 byte dcs header */ + if (msg->rx_buf && msg->rx_len) + memcpy(msg->rx_buf, buf + 4, msg->rx_len); + + return msg->rx_len; +} + + +static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) +{ + int ret; + u32 iova; + bool triggered; + + ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova); + if (ret) { + pr_err("%s: failed to get iova: %d\n", __func__, ret); + return ret; + } + + reinit_completion(&msm_host->dma_comp); + + dsi_wait4video_eng_busy(msm_host); + + triggered = msm_dsi_manager_cmd_xfer_trigger( + msm_host->id, iova, len); + if (triggered) { + ret = wait_for_completion_timeout(&msm_host->dma_comp, + msecs_to_jiffies(200)); + DBG("ret=%d", ret); + if (ret == 0) + ret = -ETIMEDOUT; + else + ret = len; + } else + ret = len; + + return ret; +} + +static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host, + u8 *buf, int rx_byte, int pkt_size) +{ + u32 *lp, *temp, data; + int i, j = 0, cnt; + bool ack_error = false; + u32 read_cnt; + u8 reg[16]; + int repeated_bytes = 0; + int buf_offset = buf - msm_host->rx_buf; + + lp = (u32 *)buf; + temp = (u32 *)reg; + cnt = (rx_byte + 3) >> 2; + if (cnt > 4) + cnt = 4; /* 4 x 32 bits registers only */ + + /* Calculate real read data count */ + read_cnt = dsi_read(msm_host, 0x1d4) >> 16; + + ack_error = (rx_byte == 4) ? + (read_cnt == 8) : /* short pkt + 4-byte error pkt */ + (read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/ + + if (ack_error) + read_cnt -= 4; /* Remove 4 byte error pkt */ + + /* + * In case of multiple reads from the panel, after the first read, there + * is possibility that there are some bytes in the payload repeating in + * the RDBK_DATA registers. Since we read all the parameters from the + * panel right from the first byte for every pass. We need to skip the + * repeating bytes and then append the new parameters to the rx buffer. + */ + if (read_cnt > 16) { + int bytes_shifted; + /* Any data more than 16 bytes will be shifted out. + * The temp read buffer should already contain these bytes. + * The remaining bytes in read buffer are the repeated bytes. + */ + bytes_shifted = read_cnt - 16; + repeated_bytes = buf_offset - bytes_shifted; + } + + for (i = cnt - 1; i >= 0; i--) { + data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i)); + *temp++ = ntohl(data); /* to host byte order */ + DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data)); + } + + for (i = repeated_bytes; i < 16; i++) + buf[j++] = reg[i]; + + return j; +} + +static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host, + const struct mipi_dsi_msg *msg) +{ + int len, ret; + int bllp_len = msm_host->mode->hdisplay * + dsi_get_bpp(msm_host->format) / 8; + + len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg); + if (!len) { + pr_err("%s: failed to add cmd type = 0x%x\n", + __func__, msg->type); + return -EINVAL; + } + + /* for video mode, do not send cmds more than + * one pixel line, since it only transmit it + * during BLLP. + */ + /* TODO: if the command is sent in LP mode, the bit rate is only + * half of esc clk rate. In this case, if the video is already + * actively streaming, we need to check more carefully if the + * command can be fit into one BLLP. + */ + if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) { + pr_err("%s: cmd cannot fit into BLLP period, len=%d\n", + __func__, len); + return -EINVAL; + } + + ret = dsi_cmd_dma_tx(msm_host, len); + if (ret < len) { + pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n", + __func__, msg->type, (*(u8 *)(msg->tx_buf)), len); + return -ECOMM; + } + + return len; +} + +static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host) +{ + u32 data0, data1; + + data0 = dsi_read(msm_host, REG_DSI_CTRL); + data1 = data0; + data1 &= ~DSI_CTRL_ENABLE; + dsi_write(msm_host, REG_DSI_CTRL, data1); + /* + * dsi controller need to be disabled before + * clocks turned on + */ + wmb(); + + dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); + wmb(); /* make sure clocks enabled */ + + /* dsi controller can only be reset while clocks are running */ + dsi_write(msm_host, REG_DSI_RESET, 1); + wmb(); /* make sure reset happen */ + dsi_write(msm_host, REG_DSI_RESET, 0); + wmb(); /* controller out of reset */ + dsi_write(msm_host, REG_DSI_CTRL, data0); + wmb(); /* make sure dsi controller enabled again */ +} + +static void dsi_err_worker(struct work_struct *work) +{ + struct msm_dsi_host *msm_host = + container_of(work, struct msm_dsi_host, err_work); + u32 status = msm_host->err_work_state; + + pr_err("%s: status=%x\n", __func__, status); + if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW) + dsi_sw_reset_restore(msm_host); + + /* It is safe to clear here because error irq is disabled. */ + msm_host->err_work_state = 0; + + /* enable dsi error interrupt */ + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); +} + +static void dsi_ack_err_status(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS); + + if (status) { + dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status); + /* Writing of an extra 0 needed to clear error bits */ + dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0); + msm_host->err_work_state |= DSI_ERR_STATE_ACK; + } +} + +static void dsi_timeout_status(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS); + + if (status) { + dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status); + msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT; + } +} + +static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR); + + if (status) { + dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status); + msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY; + } +} + +static void dsi_fifo_status(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_FIFO_STATUS); + + /* fifo underflow, overflow */ + if (status) { + dsi_write(msm_host, REG_DSI_FIFO_STATUS, status); + msm_host->err_work_state |= DSI_ERR_STATE_FIFO; + if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW) + msm_host->err_work_state |= + DSI_ERR_STATE_MDP_FIFO_UNDERFLOW; + } +} + +static void dsi_status(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_STATUS0); + + if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) { + dsi_write(msm_host, REG_DSI_STATUS0, status); + msm_host->err_work_state |= + DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION; + } +} + +static void dsi_clk_status(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_CLK_STATUS); + + if (status & DSI_CLK_STATUS_PLL_UNLOCKED) { + dsi_write(msm_host, REG_DSI_CLK_STATUS, status); + msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED; + } +} + +static void dsi_error(struct msm_dsi_host *msm_host) +{ + /* disable dsi error interrupt */ + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0); + + dsi_clk_status(msm_host); + dsi_fifo_status(msm_host); + dsi_ack_err_status(msm_host); + dsi_timeout_status(msm_host); + dsi_status(msm_host); + dsi_dln0_phy_err(msm_host); + + queue_work(msm_host->workqueue, &msm_host->err_work); +} + +static irqreturn_t dsi_host_irq(int irq, void *ptr) +{ + struct msm_dsi_host *msm_host = ptr; + u32 isr; + unsigned long flags; + + if (!msm_host->ctrl_base) + return IRQ_HANDLED; + + spin_lock_irqsave(&msm_host->intr_lock, flags); + isr = dsi_read(msm_host, REG_DSI_INTR_CTRL); + dsi_write(msm_host, REG_DSI_INTR_CTRL, isr); + spin_unlock_irqrestore(&msm_host->intr_lock, flags); + + DBG("isr=0x%x, id=%d", isr, msm_host->id); + + if (isr & DSI_IRQ_ERROR) + dsi_error(msm_host); + + if (isr & DSI_IRQ_VIDEO_DONE) + complete(&msm_host->video_comp); + + if (isr & DSI_IRQ_CMD_DMA_DONE) + complete(&msm_host->dma_comp); + + return IRQ_HANDLED; +} + +static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host, + struct device *panel_device) +{ + int ret; + + msm_host->disp_en_gpio = devm_gpiod_get(panel_device, + "disp-enable"); + if (IS_ERR(msm_host->disp_en_gpio)) { + DBG("cannot get disp-enable-gpios %ld", + PTR_ERR(msm_host->disp_en_gpio)); + msm_host->disp_en_gpio = NULL; + } + if (msm_host->disp_en_gpio) { + ret = gpiod_direction_output(msm_host->disp_en_gpio, 0); + if (ret) { + pr_err("cannot set dir to disp-en-gpios %d\n", ret); + return ret; + } + } + + msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te"); + if (IS_ERR(msm_host->te_gpio)) { + DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio)); + msm_host->te_gpio = NULL; + } + + if (msm_host->te_gpio) { + ret = gpiod_direction_input(msm_host->te_gpio); + if (ret) { + pr_err("%s: cannot set dir to disp-te-gpios, %d\n", + __func__, ret); + return ret; + } + } + + return 0; +} + +static int dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + int ret; + + msm_host->channel = dsi->channel; + msm_host->lanes = dsi->lanes; + msm_host->format = dsi->format; + msm_host->mode_flags = dsi->mode_flags; + + msm_host->panel_node = dsi->dev.of_node; + + /* Some gpios defined in panel DT need to be controlled by host */ + ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); + if (ret) + return ret; + + DBG("id=%d", msm_host->id); + if (msm_host->dev) + drm_helper_hpd_irq_event(msm_host->dev); + + return 0; +} + +static int dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + msm_host->panel_node = NULL; + + DBG("id=%d", msm_host->id); + if (msm_host->dev) + drm_helper_hpd_irq_event(msm_host->dev); + + return 0; +} + +static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + int ret; + + if (!msg || !msm_host->power_on) + return -EINVAL; + + mutex_lock(&msm_host->cmd_mutex); + ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg); + mutex_unlock(&msm_host->cmd_mutex); + + return ret; +} + +static struct mipi_dsi_host_ops dsi_host_ops = { + .attach = dsi_host_attach, + .detach = dsi_host_detach, + .transfer = dsi_host_transfer, +}; + +int msm_dsi_host_init(struct msm_dsi *msm_dsi) +{ + struct msm_dsi_host *msm_host = NULL; + struct platform_device *pdev = msm_dsi->pdev; + int ret; + + msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL); + if (!msm_host) { + pr_err("%s: FAILED: cannot alloc dsi host\n", + __func__); + ret = -ENOMEM; + goto fail; + } + + ret = of_property_read_u32(pdev->dev.of_node, + "qcom,dsi-host-index", &msm_host->id); + if (ret) { + dev_err(&pdev->dev, + "%s: host index not specified, ret=%d\n", + __func__, ret); + goto fail; + } + msm_host->pdev = pdev; + + ret = dsi_clk_init(msm_host); + if (ret) { + pr_err("%s: unable to initialize dsi clks\n", __func__); + goto fail; + } + + msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL"); + if (IS_ERR(msm_host->ctrl_base)) { + pr_err("%s: unable to map Dsi ctrl base\n", __func__); + ret = PTR_ERR(msm_host->ctrl_base); + goto fail; + } + + msm_host->cfg = dsi_get_config(msm_host); + if (!msm_host->cfg) { + ret = -EINVAL; + pr_err("%s: get config failed\n", __func__); + goto fail; + } + + ret = dsi_regulator_init(msm_host); + if (ret) { + pr_err("%s: regulator init failed\n", __func__); + goto fail; + } + + msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL); + if (!msm_host->rx_buf) { + pr_err("%s: alloc rx temp buf failed\n", __func__); + goto fail; + } + + init_completion(&msm_host->dma_comp); + init_completion(&msm_host->video_comp); + mutex_init(&msm_host->dev_mutex); + mutex_init(&msm_host->cmd_mutex); + mutex_init(&msm_host->clk_mutex); + spin_lock_init(&msm_host->intr_lock); + + /* setup workqueue */ + msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); + INIT_WORK(&msm_host->err_work, dsi_err_worker); + + msm_dsi->phy = msm_dsi_phy_init(pdev, msm_host->cfg->phy_type, + msm_host->id); + if (!msm_dsi->phy) { + ret = -EINVAL; + pr_err("%s: phy init failed\n", __func__); + goto fail; + } + msm_dsi->host = &msm_host->base; + msm_dsi->id = msm_host->id; + + DBG("Dsi Host %d initialized", msm_host->id); + return 0; + +fail: + return ret; +} + +void msm_dsi_host_destroy(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + DBG(""); + dsi_tx_buf_free(msm_host); + if (msm_host->workqueue) { + flush_workqueue(msm_host->workqueue); + destroy_workqueue(msm_host->workqueue); + msm_host->workqueue = NULL; + } + + mutex_destroy(&msm_host->clk_mutex); + mutex_destroy(&msm_host->cmd_mutex); + mutex_destroy(&msm_host->dev_mutex); +} + +int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, + struct drm_device *dev) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + struct platform_device *pdev = msm_host->pdev; + int ret; + + msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (msm_host->irq < 0) { + ret = msm_host->irq; + dev_err(dev->dev, "failed to get irq: %d\n", ret); + return ret; + } + + ret = devm_request_irq(&pdev->dev, msm_host->irq, + dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "dsi_isr", msm_host); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", + msm_host->irq, ret); + return ret; + } + + msm_host->dev = dev; + ret = dsi_tx_buf_alloc(msm_host, SZ_4K); + if (ret) { + pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret); + return ret; + } + + return 0; +} + +int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + struct device_node *node; + int ret; + + /* Register mipi dsi host */ + if (!msm_host->registered) { + host->dev = &msm_host->pdev->dev; + host->ops = &dsi_host_ops; + ret = mipi_dsi_host_register(host); + if (ret) + return ret; + + msm_host->registered = true; + + /* If the panel driver has not been probed after host register, + * we should defer the host's probe. + * It makes sure panel is connected when fbcon detects + * connector status and gets the proper display mode to + * create framebuffer. + */ + if (check_defer) { + node = of_get_child_by_name(msm_host->pdev->dev.of_node, + "panel"); + if (node) { + if (!of_drm_find_panel(node)) + return -EPROBE_DEFER; + } + } + } + + return 0; +} + +void msm_dsi_host_unregister(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + if (msm_host->registered) { + mipi_dsi_host_unregister(host); + host->dev = NULL; + host->ops = NULL; + msm_host->registered = false; + } +} + +int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + /* TODO: make sure dsi_cmd_mdp is idle. + * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME + * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed. + * How to handle the old versions? Wait for mdp cmd done? + */ + + /* + * mdss interrupt is generated in mdp core clock domain + * mdp clock need to be enabled to receive dsi interrupt + */ + dsi_clk_ctrl(msm_host, 1); + + /* TODO: vote for bus bandwidth */ + + if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) + dsi_set_tx_power_mode(0, msm_host); + + msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL); + dsi_write(msm_host, REG_DSI_CTRL, + msm_host->dma_cmd_ctrl_restore | + DSI_CTRL_CMD_MODE_EN | + DSI_CTRL_ENABLE); + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1); + + return 0; +} + +void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0); + dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore); + + if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) + dsi_set_tx_power_mode(1, msm_host); + + /* TODO: unvote for bus bandwidth */ + + dsi_clk_ctrl(msm_host, 0); +} + +int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + return dsi_cmds2buf_tx(msm_host, msg); +} + +int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + int data_byte, rx_byte, dlen, end; + int short_response, diff, pkt_size, ret = 0; + char cmd; + int rlen = msg->rx_len; + u8 *buf; + + if (rlen <= 2) { + short_response = 1; + pkt_size = rlen; + rx_byte = 4; + } else { + short_response = 0; + data_byte = 10; /* first read */ + if (rlen < data_byte) + pkt_size = rlen; + else + pkt_size = data_byte; + rx_byte = data_byte + 6; /* 4 header + 2 crc */ + } + + buf = msm_host->rx_buf; + end = 0; + while (!end) { + u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8}; + struct mipi_dsi_msg max_pkt_size_msg = { + .channel = msg->channel, + .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, + .tx_len = 2, + .tx_buf = tx, + }; + + DBG("rlen=%d pkt_size=%d rx_byte=%d", + rlen, pkt_size, rx_byte); + + ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg); + if (ret < 2) { + pr_err("%s: Set max pkt size failed, %d\n", + __func__, ret); + return -EINVAL; + } + + if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) && + (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) { + /* Clear the RDBK_DATA registers */ + dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, + DSI_RDBK_DATA_CTRL_CLR); + wmb(); /* make sure the RDBK registers are cleared */ + dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0); + wmb(); /* release cleared status before transfer */ + } + + ret = dsi_cmds2buf_tx(msm_host, msg); + if (ret < msg->tx_len) { + pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret); + return ret; + } + + /* + * once cmd_dma_done interrupt received, + * return data from client is ready and stored + * at RDBK_DATA register already + * since rx fifo is 16 bytes, dcs header is kept at first loop, + * after that dcs header lost during shift into registers + */ + dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size); + + if (dlen <= 0) + return 0; + + if (short_response) + break; + + if (rlen <= data_byte) { + diff = data_byte - rlen; + end = 1; + } else { + diff = 0; + rlen -= data_byte; + } + + if (!end) { + dlen -= 2; /* 2 crc */ + dlen -= diff; + buf += dlen; /* next start position */ + data_byte = 14; /* NOT first read */ + if (rlen < data_byte) + pkt_size += rlen; + else + pkt_size += data_byte; + DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff); + } + } + + /* + * For single Long read, if the requested rlen < 10, + * we need to shift the start position of rx + * data buffer to skip the bytes which are not + * updated. + */ + if (pkt_size < 10 && !short_response) + buf = msm_host->rx_buf + (10 - rlen); + else + buf = msm_host->rx_buf; + + cmd = buf[0]; + switch (cmd) { + case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: + pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__); + ret = 0; + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: + ret = dsi_short_read1_resp(buf, msg); + break; + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: + ret = dsi_short_read2_resp(buf, msg); + break; + case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: + case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: + ret = dsi_long_read_resp(buf, msg); + break; + default: + pr_warn("%s:Invalid response cmd\n", __func__); + ret = 0; + } + + return ret; +} + +void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + dsi_write(msm_host, REG_DSI_DMA_BASE, iova); + dsi_write(msm_host, REG_DSI_DMA_LEN, len); + dsi_write(msm_host, REG_DSI_TRIG_DMA, 1); + + /* Make sure trigger happens */ + wmb(); +} + +int msm_dsi_host_enable(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + dsi_op_mode_config(msm_host, + !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true); + + /* TODO: clock should be turned off for command mode, + * and only turned on before MDP START. + * This part of code should be enabled once mdp driver support it. + */ + /* if (msm_panel->mode == MSM_DSI_CMD_MODE) + dsi_clk_ctrl(msm_host, 0); */ + + return 0; +} + +int msm_dsi_host_disable(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + dsi_op_mode_config(msm_host, + !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); + + /* Since we have disabled INTF, the video engine won't stop so that + * the cmd engine will be blocked. + * Reset to disable video engine so that we can send off cmd. + */ + dsi_sw_reset(msm_host); + + return 0; +} + +int msm_dsi_host_power_on(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + u32 clk_pre = 0, clk_post = 0; + int ret = 0; + + mutex_lock(&msm_host->dev_mutex); + if (msm_host->power_on) { + DBG("dsi host already on"); + goto unlock_ret; + } + + ret = dsi_calc_clk_rate(msm_host); + if (ret) { + pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); + goto unlock_ret; + } + + ret = dsi_host_regulator_enable(msm_host); + if (ret) { + pr_err("%s:Failed to enable vregs.ret=%d\n", + __func__, ret); + goto unlock_ret; + } + + ret = dsi_bus_clk_enable(msm_host); + if (ret) { + pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret); + goto fail_disable_reg; + } + + dsi_phy_sw_reset(msm_host); + ret = msm_dsi_manager_phy_enable(msm_host->id, + msm_host->byte_clk_rate * 8, + clk_get_rate(msm_host->esc_clk), + &clk_pre, &clk_post); + dsi_bus_clk_disable(msm_host); + if (ret) { + pr_err("%s: failed to enable phy, %d\n", __func__, ret); + goto fail_disable_reg; + } + + ret = dsi_clk_ctrl(msm_host, 1); + if (ret) { + pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret); + goto fail_disable_reg; + } + + dsi_timing_setup(msm_host); + dsi_sw_reset(msm_host); + dsi_ctrl_config(msm_host, true, clk_pre, clk_post); + + if (msm_host->disp_en_gpio) + gpiod_set_value(msm_host->disp_en_gpio, 1); + + msm_host->power_on = true; + mutex_unlock(&msm_host->dev_mutex); + + return 0; + +fail_disable_reg: + dsi_host_regulator_disable(msm_host); +unlock_ret: + mutex_unlock(&msm_host->dev_mutex); + return ret; +} + +int msm_dsi_host_power_off(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + mutex_lock(&msm_host->dev_mutex); + if (!msm_host->power_on) { + DBG("dsi host already off"); + goto unlock_ret; + } + + dsi_ctrl_config(msm_host, false, 0, 0); + + if (msm_host->disp_en_gpio) + gpiod_set_value(msm_host->disp_en_gpio, 0); + + msm_dsi_manager_phy_disable(msm_host->id); + + dsi_clk_ctrl(msm_host, 0); + + dsi_host_regulator_disable(msm_host); + + DBG("-"); + + msm_host->power_on = false; + +unlock_ret: + mutex_unlock(&msm_host->dev_mutex); + return 0; +} + +int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, + struct drm_display_mode *mode) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + if (msm_host->mode) { + drm_mode_destroy(msm_host->dev, msm_host->mode); + msm_host->mode = NULL; + } + + msm_host->mode = drm_mode_duplicate(msm_host->dev, mode); + if (IS_ERR(msm_host->mode)) { + pr_err("%s: cannot duplicate mode\n", __func__); + return PTR_ERR(msm_host->mode); + } + + return 0; +} + +struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, + unsigned long *panel_flags) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + struct drm_panel *panel; + + panel = of_drm_find_panel(msm_host->panel_node); + if (panel_flags) + *panel_flags = msm_host->mode_flags; + + return panel; +} + diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c new file mode 100644 index 0000000..ee3ebca --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -0,0 +1,705 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "msm_kms.h" +#include "dsi.h" + +struct msm_dsi_manager { + struct msm_dsi *dsi[DSI_MAX]; + + bool is_dual_panel; + bool is_sync_needed; + int master_panel_id; +}; + +static struct msm_dsi_manager msm_dsim_glb; + +#define IS_DUAL_PANEL() (msm_dsim_glb.is_dual_panel) +#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed) +#define IS_MASTER_PANEL(id) (msm_dsim_glb.master_panel_id == id) + +static inline struct msm_dsi *dsi_mgr_get_dsi(int id) +{ + return msm_dsim_glb.dsi[id]; +} + +static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id) +{ + return msm_dsim_glb.dsi[(id + 1) % DSI_MAX]; +} + +static int dsi_mgr_parse_dual_panel(struct device_node *np, int id) +{ + struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; + + /* We assume 2 dsi nodes have the same information of dual-panel and + * sync-mode, and only one node specifies master in case of dual mode. + */ + if (!msm_dsim->is_dual_panel) + msm_dsim->is_dual_panel = of_property_read_bool( + np, "qcom,dual-panel-mode"); + + if (msm_dsim->is_dual_panel) { + if (of_property_read_bool(np, "qcom,master-panel")) + msm_dsim->master_panel_id = id; + if (!msm_dsim->is_sync_needed) + msm_dsim->is_sync_needed = of_property_read_bool( + np, "qcom,sync-dual-panel"); + } + + return 0; +} + +struct dsi_connector { + struct drm_connector base; + int id; +}; + +struct dsi_bridge { + struct drm_bridge base; + int id; +}; + +#define to_dsi_connector(x) container_of(x, struct dsi_connector, base) +#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base) + +static inline int dsi_mgr_connector_get_id(struct drm_connector *connector) +{ + struct dsi_connector *dsi_connector = to_dsi_connector(connector); + return dsi_connector->id; +} + +static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge) +{ + struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge); + return dsi_bridge->id; +} + +static enum drm_connector_status dsi_mgr_connector_detect( + struct drm_connector *connector, bool force) +{ + int id = dsi_mgr_connector_get_id(connector); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); + struct msm_drm_private *priv = connector->dev->dev_private; + struct msm_kms *kms = priv->kms; + + DBG("id=%d", id); + if (!msm_dsi->panel) { + msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host, + &msm_dsi->panel_flags); + + /* There is only 1 panel in the global panel list + * for dual panel mode. Therefore slave dsi should get + * the drm_panel instance from master dsi, and + * keep using the panel flags got from the current DSI link. + */ + if (!msm_dsi->panel && IS_DUAL_PANEL() && + !IS_MASTER_PANEL(id) && other_dsi) + msm_dsi->panel = msm_dsi_host_get_panel( + other_dsi->host, NULL); + + if (msm_dsi->panel && IS_DUAL_PANEL()) + drm_object_attach_property(&connector->base, + connector->dev->mode_config.tile_property, 0); + + /* Set split display info to kms once dual panel is connected + * to both hosts + */ + if (msm_dsi->panel && IS_DUAL_PANEL() && + other_dsi && other_dsi->panel) { + bool cmd_mode = !(msm_dsi->panel_flags & + MIPI_DSI_MODE_VIDEO); + struct drm_encoder *encoder = msm_dsi_get_encoder( + dsi_mgr_get_dsi(DSI_ENCODER_MASTER)); + struct drm_encoder *slave_enc = msm_dsi_get_encoder( + dsi_mgr_get_dsi(DSI_ENCODER_SLAVE)); + + if (kms->funcs->set_split_display) + kms->funcs->set_split_display(kms, encoder, + slave_enc, cmd_mode); + else + pr_err("mdp does not support dual panel\n"); + } + } + + return msm_dsi->panel ? connector_status_connected : + connector_status_disconnected; +} + +static void dsi_mgr_connector_destroy(struct drm_connector *connector) +{ + DBG(""); + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +static void dsi_dual_connector_fix_modes(struct drm_connector *connector) +{ + struct drm_display_mode *mode, *m; + + /* Only support left-right mode */ + list_for_each_entry_safe(mode, m, &connector->probed_modes, head) { + mode->clock >>= 1; + mode->hdisplay >>= 1; + mode->hsync_start >>= 1; + mode->hsync_end >>= 1; + mode->htotal >>= 1; + drm_mode_set_name(mode); + } +} + +static int dsi_dual_connector_tile_init( + struct drm_connector *connector, int id) +{ + struct drm_display_mode *mode; + /* Fake topology id */ + char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'}; + + if (connector->tile_group) { + DBG("Tile property has been initialized"); + return 0; + } + + /* Use the first mode only for now */ + mode = list_first_entry(&connector->probed_modes, + struct drm_display_mode, + head); + if (!mode) + return -EINVAL; + + connector->tile_group = drm_mode_get_tile_group( + connector->dev, topo_id); + if (!connector->tile_group) + connector->tile_group = drm_mode_create_tile_group( + connector->dev, topo_id); + if (!connector->tile_group) { + pr_err("%s: failed to create tile group\n", __func__); + return -ENOMEM; + } + + connector->has_tile = true; + connector->tile_is_single_monitor = true; + + /* mode has been fixed */ + connector->tile_h_size = mode->hdisplay; + connector->tile_v_size = mode->vdisplay; + + /* Only support left-right mode */ + connector->num_h_tile = 2; + connector->num_v_tile = 1; + + connector->tile_v_loc = 0; + connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0; + + return 0; +} + +static int dsi_mgr_connector_get_modes(struct drm_connector *connector) +{ + int id = dsi_mgr_connector_get_id(connector); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct drm_panel *panel = msm_dsi->panel; + int ret, num; + + if (!panel) + return 0; + + /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode, + * panel should not attach to any connector. + * Only temporarily attach panel to the current connector here, + * to let panel set mode to this connector. + */ + drm_panel_attach(panel, connector); + num = drm_panel_get_modes(panel); + drm_panel_detach(panel); + if (!num) + return 0; + + if (IS_DUAL_PANEL()) { + /* report half resolution to user */ + dsi_dual_connector_fix_modes(connector); + ret = dsi_dual_connector_tile_init(connector, id); + if (ret) + return ret; + ret = drm_mode_connector_set_tile_property(connector); + if (ret) { + pr_err("%s: set tile property failed, %d\n", + __func__, ret); + return ret; + } + } + + return num; +} + +static int dsi_mgr_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + int id = dsi_mgr_connector_get_id(connector); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi); + struct msm_drm_private *priv = connector->dev->dev_private; + struct msm_kms *kms = priv->kms; + long actual, requested; + + DBG(""); + requested = 1000 * mode->clock; + actual = kms->funcs->round_pixclk(kms, requested, encoder); + + DBG("requested=%ld, actual=%ld", requested, actual); + if (actual != requested) + return MODE_CLOCK_RANGE; + + return MODE_OK; +} + +static struct drm_encoder * +dsi_mgr_connector_best_encoder(struct drm_connector *connector) +{ + int id = dsi_mgr_connector_get_id(connector); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + + DBG(""); + return msm_dsi_get_encoder(msm_dsi); +} + +static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) +{ + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); + struct mipi_dsi_host *host = msm_dsi->host; + struct drm_panel *panel = msm_dsi->panel; + bool is_dual_panel = IS_DUAL_PANEL(); + int ret; + + DBG("id=%d", id); + if (!panel || (is_dual_panel && (DSI_1 == id))) + return; + + ret = msm_dsi_host_power_on(host); + if (ret) { + pr_err("%s: power on host %d failed, %d\n", __func__, id, ret); + goto host_on_fail; + } + + if (is_dual_panel && msm_dsi1) { + ret = msm_dsi_host_power_on(msm_dsi1->host); + if (ret) { + pr_err("%s: power on host1 failed, %d\n", + __func__, ret); + goto host1_on_fail; + } + } + + /* Always call panel functions once, because even for dual panels, + * there is only one drm_panel instance. + */ + ret = drm_panel_prepare(panel); + if (ret) { + pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret); + goto panel_prep_fail; + } + + ret = msm_dsi_host_enable(host); + if (ret) { + pr_err("%s: enable host %d failed, %d\n", __func__, id, ret); + goto host_en_fail; + } + + if (is_dual_panel && msm_dsi1) { + ret = msm_dsi_host_enable(msm_dsi1->host); + if (ret) { + pr_err("%s: enable host1 failed, %d\n", __func__, ret); + goto host1_en_fail; + } + } + + ret = drm_panel_enable(panel); + if (ret) { + pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret); + goto panel_en_fail; + } + + return; + +panel_en_fail: + if (is_dual_panel && msm_dsi1) + msm_dsi_host_disable(msm_dsi1->host); +host1_en_fail: + msm_dsi_host_disable(host); +host_en_fail: + drm_panel_unprepare(panel); +panel_prep_fail: + if (is_dual_panel && msm_dsi1) + msm_dsi_host_power_off(msm_dsi1->host); +host1_on_fail: + msm_dsi_host_power_off(host); +host_on_fail: + return; +} + +static void dsi_mgr_bridge_enable(struct drm_bridge *bridge) +{ + DBG(""); +} + +static void dsi_mgr_bridge_disable(struct drm_bridge *bridge) +{ + DBG(""); +} + +static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) +{ + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); + struct mipi_dsi_host *host = msm_dsi->host; + struct drm_panel *panel = msm_dsi->panel; + bool is_dual_panel = IS_DUAL_PANEL(); + int ret; + + DBG("id=%d", id); + + if (!panel || (is_dual_panel && (DSI_1 == id))) + return; + + ret = drm_panel_disable(panel); + if (ret) + pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret); + + ret = msm_dsi_host_disable(host); + if (ret) + pr_err("%s: host %d disable failed, %d\n", __func__, id, ret); + + if (is_dual_panel && msm_dsi1) { + ret = msm_dsi_host_disable(msm_dsi1->host); + if (ret) + pr_err("%s: host1 disable failed, %d\n", __func__, ret); + } + + ret = drm_panel_unprepare(panel); + if (ret) + pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret); + + ret = msm_dsi_host_power_off(host); + if (ret) + pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); + + if (is_dual_panel && msm_dsi1) { + ret = msm_dsi_host_power_off(msm_dsi1->host); + if (ret) + pr_err("%s: host1 power off failed, %d\n", + __func__, ret); + } +} + +static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); + struct mipi_dsi_host *host = msm_dsi->host; + bool is_dual_panel = IS_DUAL_PANEL(); + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + if (is_dual_panel && (DSI_1 == id)) + return; + + msm_dsi_host_set_display_mode(host, adjusted_mode); + if (is_dual_panel && other_dsi) + msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode); +} + +static const struct drm_connector_funcs dsi_mgr_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .detect = dsi_mgr_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = dsi_mgr_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = { + .get_modes = dsi_mgr_connector_get_modes, + .mode_valid = dsi_mgr_connector_mode_valid, + .best_encoder = dsi_mgr_connector_best_encoder, +}; + +static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = { + .pre_enable = dsi_mgr_bridge_pre_enable, + .enable = dsi_mgr_bridge_enable, + .disable = dsi_mgr_bridge_disable, + .post_disable = dsi_mgr_bridge_post_disable, + .mode_set = dsi_mgr_bridge_mode_set, +}; + +/* initialize connector */ +struct drm_connector *msm_dsi_manager_connector_init(u8 id) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct drm_connector *connector = NULL; + struct dsi_connector *dsi_connector; + int ret; + + dsi_connector = devm_kzalloc(msm_dsi->dev->dev, + sizeof(*dsi_connector), GFP_KERNEL); + if (!dsi_connector) { + ret = -ENOMEM; + goto fail; + } + + dsi_connector->id = id; + + connector = &dsi_connector->base; + + ret = drm_connector_init(msm_dsi->dev, connector, + &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI); + if (ret) + goto fail; + + drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs); + + /* Enable HPD to let hpd event is handled + * when panel is attached to the host. + */ + connector->polled = DRM_CONNECTOR_POLL_HPD; + + /* Display driver doesn't support interlace now. */ + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + ret = drm_connector_register(connector); + if (ret) + goto fail; + + return connector; + +fail: + if (connector) + dsi_mgr_connector_destroy(connector); + + return ERR_PTR(ret); +} + +/* initialize bridge */ +struct drm_bridge *msm_dsi_manager_bridge_init(u8 id) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct drm_bridge *bridge = NULL; + struct dsi_bridge *dsi_bridge; + int ret; + + dsi_bridge = devm_kzalloc(msm_dsi->dev->dev, + sizeof(*dsi_bridge), GFP_KERNEL); + if (!dsi_bridge) { + ret = -ENOMEM; + goto fail; + } + + dsi_bridge->id = id; + + bridge = &dsi_bridge->base; + bridge->funcs = &dsi_mgr_bridge_funcs; + + ret = drm_bridge_attach(msm_dsi->dev, bridge); + if (ret) + goto fail; + + return bridge; + +fail: + if (bridge) + msm_dsi_manager_bridge_destroy(bridge); + + return ERR_PTR(ret); +} + +void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge) +{ +} + +int msm_dsi_manager_phy_enable(int id, + const unsigned long bit_rate, const unsigned long esc_rate, + u32 *clk_pre, u32 *clk_post) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi_phy *phy = msm_dsi->phy; + int ret; + + ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate); + if (ret) + return ret; + + msm_dsi->phy_enabled = true; + msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post); + + return 0; +} + +void msm_dsi_manager_phy_disable(int id) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); + struct msm_dsi_phy *phy = msm_dsi->phy; + + /* disable DSI phy + * In dual-dsi configuration, the phy should be disabled for the + * first controller only when the second controller is disabled. + */ + msm_dsi->phy_enabled = false; + if (IS_DUAL_PANEL() && mdsi && sdsi) { + if (!mdsi->phy_enabled && !sdsi->phy_enabled) { + msm_dsi_phy_disable(sdsi->phy); + msm_dsi_phy_disable(mdsi->phy); + } + } else { + msm_dsi_phy_disable(phy); + } +} + +int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0); + struct mipi_dsi_host *host = msm_dsi->host; + bool is_read = (msg->rx_buf && msg->rx_len); + bool need_sync = (IS_SYNC_NEEDED() && !is_read); + int ret; + + if (!msg->tx_buf || !msg->tx_len) + return 0; + + /* In dual master case, panel requires the same commands sent to + * both DSI links. Host issues the command trigger to both links + * when DSI_1 calls the cmd transfer function, no matter it happens + * before or after DSI_0 cmd transfer. + */ + if (need_sync && (id == DSI_0)) + return is_read ? msg->rx_len : msg->tx_len; + + if (need_sync && msm_dsi0) { + ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg); + if (ret) { + pr_err("%s: failed to prepare non-trigger host, %d\n", + __func__, ret); + return ret; + } + } + ret = msm_dsi_host_xfer_prepare(host, msg); + if (ret) { + pr_err("%s: failed to prepare host, %d\n", __func__, ret); + goto restore_host0; + } + + ret = is_read ? msm_dsi_host_cmd_rx(host, msg) : + msm_dsi_host_cmd_tx(host, msg); + + msm_dsi_host_xfer_restore(host, msg); + +restore_host0: + if (need_sync && msm_dsi0) + msm_dsi_host_xfer_restore(msm_dsi0->host, msg); + + return ret; +} + +bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0); + struct mipi_dsi_host *host = msm_dsi->host; + + if (IS_SYNC_NEEDED() && (id == DSI_0)) + return false; + + if (IS_SYNC_NEEDED() && msm_dsi0) + msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len); + + msm_dsi_host_cmd_xfer_commit(host, iova, len); + + return true; +} + +int msm_dsi_manager_register(struct msm_dsi *msm_dsi) +{ + struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; + int id = msm_dsi->id; + struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); + int ret; + + if (id > DSI_MAX) { + pr_err("%s: invalid id %d\n", __func__, id); + return -EINVAL; + } + + if (msm_dsim->dsi[id]) { + pr_err("%s: dsi%d already registered\n", __func__, id); + return -EBUSY; + } + + msm_dsim->dsi[id] = msm_dsi; + + ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id); + if (ret) { + pr_err("%s: failed to parse dual panel info\n", __func__); + return ret; + } + + if (!IS_DUAL_PANEL()) { + ret = msm_dsi_host_register(msm_dsi->host, true); + } else if (!other_dsi) { + return 0; + } else { + struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ? + msm_dsi : other_dsi; + struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ? + other_dsi : msm_dsi; + /* Register slave host first, so that slave DSI device + * has a chance to probe, and do not block the master + * DSI device's probe. + * Also, do not check defer for the slave host, + * because only master DSI device adds the panel to global + * panel list. The panel's device is the master DSI device. + */ + ret = msm_dsi_host_register(sdsi->host, false); + if (ret) + return ret; + ret = msm_dsi_host_register(mdsi->host, true); + } + + return ret; +} + +void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi) +{ + struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; + + if (msm_dsi->host) + msm_dsi_host_unregister(msm_dsi->host); + msm_dsim->dsi[msm_dsi->id] = NULL; +} + diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/dsi_phy.c new file mode 100644 index 0000000..f0cea89 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_phy.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi.h" +#include "dsi.xml.h" + +#define dsi_phy_read(offset) msm_readl((offset)) +#define dsi_phy_write(offset, data) msm_writel((data), (offset)) + +struct dsi_dphy_timing { + u32 clk_pre; + u32 clk_post; + u32 clk_zero; + u32 clk_trail; + u32 clk_prepare; + u32 hs_exit; + u32 hs_zero; + u32 hs_prepare; + u32 hs_trail; + u32 hs_rqst; + u32 ta_go; + u32 ta_sure; + u32 ta_get; +}; + +struct msm_dsi_phy { + void __iomem *base; + void __iomem *reg_base; + int id; + struct dsi_dphy_timing timing; + int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel, + const unsigned long bit_rate, const unsigned long esc_rate); + int (*disable)(struct msm_dsi_phy *phy); +}; + +#define S_DIV_ROUND_UP(n, d) \ + (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d))) + +static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent, + s32 min_result, bool even) +{ + s32 v; + v = (tmax - tmin) * percent; + v = S_DIV_ROUND_UP(v, 100) + tmin; + if (even && (v & 0x1)) + return max_t(s32, min_result, v - 1); + else + return max_t(s32, min_result, v); +} + +static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing, + s32 ui, s32 coeff, s32 pcnt) +{ + s32 tmax, tmin, clk_z; + s32 temp; + + /* reset */ + temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui; + tmin = S_DIV_ROUND_UP(temp, ui) - 2; + if (tmin > 255) { + tmax = 511; + clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true); + } else { + tmax = 255; + clk_z = linear_inter(tmax, tmin, pcnt, 0, true); + } + + /* adjust */ + temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7; + timing->clk_zero = clk_z + 8 - temp; +} + +static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing, + const unsigned long bit_rate, const unsigned long esc_rate) +{ + s32 ui, lpx; + s32 tmax, tmin; + s32 pcnt0 = 10; + s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10; + s32 pcnt2 = 10; + s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); + + tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2; + tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2; + timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true); + + temp = lpx / ui; + if (temp & 0x1) + timing->hs_rqst = temp; + else + timing->hs_rqst = max_t(s32, 0, temp - 2); + + /* Calculate clk_zero after clk_prepare and hs_rqst */ + dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2); + + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = S_DIV_ROUND_UP(temp, ui) - 2; + tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2; + timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true); + + temp = 85 * coeff + 6 * ui; + tmax = S_DIV_ROUND_UP(temp, ui) - 2; + temp = 40 * coeff + 4 * ui; + tmin = S_DIV_ROUND_UP(temp, ui) - 2; + timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true); + + tmax = 255; + temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui; + temp = 145 * coeff + 10 * ui - temp; + tmin = S_DIV_ROUND_UP(temp, ui) - 2; + timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true); + + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = S_DIV_ROUND_UP(temp, ui) - 2; + temp = 60 * coeff + 4 * ui; + tmin = DIV_ROUND_UP(temp, ui) - 2; + timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true); + + tmax = 255; + tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2; + timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true); + + tmax = 63; + temp = ((timing->hs_exit >> 1) + 1) * 2 * ui; + temp = 60 * coeff + 52 * ui - 24 * ui - temp; + tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; + timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false); + + tmax = 63; + temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui; + temp += ((timing->clk_zero >> 1) + 1) * 2 * ui; + temp += 8 * ui + lpx; + tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; + if (tmin > tmax) { + temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1; + timing->clk_pre = temp >> 1; + temp = (2 * tmax - tmin) * pcnt2; + } else { + timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); + } + + timing->ta_go = 3; + timing->ta_sure = 0; + timing->ta_get = 4; + + DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->clk_pre, timing->clk_post, timing->clk_zero, + timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, + timing->hs_rqst); + + return 0; +} + +static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *base = phy->reg_base; + + if (!enable) { + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); + return; + } + + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20); +} + +static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, + const unsigned long bit_rate, const unsigned long esc_rate) +{ + struct dsi_dphy_timing *timing = &phy->timing; + int i; + void __iomem *base = phy->base; + + DBG(""); + + if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { + pr_err("%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff); + + dsi_28nm_phy_regulator_ctrl(phy, true); + + dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); + + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0, + DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1, + DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2, + DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare)); + if (timing->clk_zero & BIT(8)) + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3, + DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4, + DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5, + DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6, + DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7, + DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8, + DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9, + DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10, + DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get)); + dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11, + DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); + + dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00); + dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f); + + dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6); + + for (i = 0; i < 4; i++) { + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97); + } + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa); + dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf); + + dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0); + dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1); + dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb); + + dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f); + + if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER)) + dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00); + else + dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01); + + return 0; +} + +static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy) +{ + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0); + dsi_28nm_phy_regulator_ctrl(phy, false); + + /* + * Wait for the registers writes to complete in order to + * ensure that the phy is completely disabled + */ + wmb(); + + return 0; +} + +#define dsi_phy_func_init(name) \ + do { \ + phy->enable = dsi_##name##_phy_enable; \ + phy->disable = dsi_##name##_phy_disable; \ + } while (0) + +struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev, + enum msm_dsi_phy_type type, int id) +{ + struct msm_dsi_phy *phy; + + phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return NULL; + + phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); + if (IS_ERR_OR_NULL(phy->base)) { + pr_err("%s: failed to map phy base\n", __func__); + return NULL; + } + phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG"); + if (IS_ERR_OR_NULL(phy->reg_base)) { + pr_err("%s: failed to map phy regulator base\n", __func__); + return NULL; + } + + switch (type) { + case MSM_DSI_PHY_28NM: + dsi_phy_func_init(28nm); + break; + default: + pr_err("%s: unsupported type, %d\n", __func__, type); + return NULL; + } + + phy->id = id; + + return phy; +} + +int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, + const unsigned long bit_rate, const unsigned long esc_rate) +{ + if (!phy || !phy->enable) + return -EINVAL; + return phy->enable(phy, is_dual_panel, bit_rate, esc_rate); +} + +int msm_dsi_phy_disable(struct msm_dsi_phy *phy) +{ + if (!phy || !phy->disable) + return -EINVAL; + return phy->disable(phy); +} + +void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, + u32 *clk_pre, u32 *clk_post) +{ + if (!phy) + return; + if (clk_pre) + *clk_pre = phy->timing.clk_pre; + if (clk_post) + *clk_post = phy->timing.clk_post; +} + diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c index eeed006..6997ec6 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c @@ -53,6 +53,23 @@ struct pll_rate { /* NOTE: keep sorted highest freq to lowest: */ static const struct pll_rate freqtbl[] = { + { 154000000, { + { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0, 0 } } + }, /* 1080p60/1080p50 case */ { 148500000, { { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, @@ -112,6 +129,23 @@ static const struct pll_rate freqtbl[] = { { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, { 0, 0 } } }, + { 74176000, { + { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0, 0 } } + }, { 65000000, { { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index c276624..b9a4ded 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h @@ -8,9 +8,9 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41) +- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 29312 bytes, from 2015-03-23 21:18:48) - /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15) -- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19) +- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-03-23 20:38:49) Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -37,11 +37,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -enum mdp5_intf { +enum mdp5_intf_type { + INTF_DISABLED = 0, INTF_DSI = 1, INTF_HDMI = 3, INTF_LCDC = 5, INTF_eDP = 9, + INTF_VIRTUAL = 100, + INTF_WB = 101, }; enum mdp5_intfnum { @@ -67,11 +70,11 @@ enum mdp5_pipe { enum mdp5_ctl_mode { MODE_NONE = 0, - MODE_ROT0 = 1, - MODE_ROT1 = 2, - MODE_WB0 = 3, - MODE_WB1 = 4, - MODE_WFD = 5, + MODE_WB_0_BLOCK = 1, + MODE_WB_1_BLOCK = 2, + MODE_WB_0_LINE = 3, + MODE_WB_1_LINE = 4, + MODE_WB_2_LINE = 5, }; enum mdp5_pack_3d { @@ -94,33 +97,6 @@ enum mdp5_pipe_bwc { BWC_Q_MED = 2, }; -enum mdp5_client_id { - CID_UNUSED = 0, - CID_VIG0_Y = 1, - CID_VIG0_CR = 2, - CID_VIG0_CB = 3, - CID_VIG1_Y = 4, - CID_VIG1_CR = 5, - CID_VIG1_CB = 6, - CID_VIG2_Y = 7, - CID_VIG2_CR = 8, - CID_VIG2_CB = 9, - CID_DMA0_Y = 10, - CID_DMA0_CR = 11, - CID_DMA0_CB = 12, - CID_DMA1_Y = 13, - CID_DMA1_CR = 14, - CID_DMA1_CB = 15, - CID_RGB0 = 16, - CID_RGB1 = 17, - CID_RGB2 = 18, - CID_VIG3_Y = 19, - CID_VIG3_CR = 20, - CID_VIG3_CB = 21, - CID_RGB3 = 22, - CID_MAX = 23, -}; - enum mdp5_cursor_format { CURSOR_FMT_ARGB8888 = 0, CURSOR_FMT_ARGB1555 = 2, @@ -144,30 +120,25 @@ enum mdp5_data_format { DATA_FORMAT_YUV = 1, }; -#define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001 -#define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002 -#define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004 -#define MDP5_IRQ_INTF3_WB_ROT_COMP 0x00000008 -#define MDP5_IRQ_INTF0_WB_WFD 0x00000010 -#define MDP5_IRQ_INTF1_WB_WFD 0x00000020 -#define MDP5_IRQ_INTF2_WB_WFD 0x00000040 -#define MDP5_IRQ_INTF3_WB_WFD 0x00000080 -#define MDP5_IRQ_INTF0_PING_PONG_COMP 0x00000100 -#define MDP5_IRQ_INTF1_PING_PONG_COMP 0x00000200 -#define MDP5_IRQ_INTF2_PING_PONG_COMP 0x00000400 -#define MDP5_IRQ_INTF3_PING_PONG_COMP 0x00000800 -#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR 0x00001000 -#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR 0x00002000 -#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR 0x00004000 -#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR 0x00008000 -#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR 0x00010000 -#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR 0x00020000 -#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR 0x00040000 -#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR 0x00080000 -#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF 0x00100000 -#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF 0x00200000 -#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF 0x00400000 -#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF 0x00800000 +#define MDP5_IRQ_WB_0_DONE 0x00000001 +#define MDP5_IRQ_WB_1_DONE 0x00000002 +#define MDP5_IRQ_WB_2_DONE 0x00000010 +#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100 +#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200 +#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400 +#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800 +#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000 +#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000 +#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000 +#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000 +#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000 +#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000 +#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000 +#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000 +#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000 +#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000 +#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000 +#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000 #define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000 #define MDP5_IRQ_INTF0_VSYNC 0x02000000 #define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000 @@ -176,136 +147,186 @@ enum mdp5_data_format { #define MDP5_IRQ_INTF2_VSYNC 0x20000000 #define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000 #define MDP5_IRQ_INTF3_VSYNC 0x80000000 -#define REG_MDP5_HW_VERSION 0x00000000 +#define REG_MDSS_HW_VERSION 0x00000000 +#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff +#define MDSS_HW_VERSION_STEP__SHIFT 0 +static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val) +{ + return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK; +} +#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000 +#define MDSS_HW_VERSION_MINOR__SHIFT 16 +static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val) +{ + return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK; +} +#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000 +#define MDSS_HW_VERSION_MAJOR__SHIFT 28 +static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val) +{ + return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK; +} + +#define REG_MDSS_HW_INTR_STATUS 0x00000010 +#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001 +#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010 +#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020 +#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100 +#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000 -#define REG_MDP5_HW_INTR_STATUS 0x00000010 -#define MDP5_HW_INTR_STATUS_INTR_MDP 0x00000001 -#define MDP5_HW_INTR_STATUS_INTR_DSI0 0x00000010 -#define MDP5_HW_INTR_STATUS_INTR_DSI1 0x00000020 -#define MDP5_HW_INTR_STATUS_INTR_HDMI 0x00000100 -#define MDP5_HW_INTR_STATUS_INTR_EDP 0x00001000 +static inline uint32_t __offset_MDP(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->mdp.base[0]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); } -#define REG_MDP5_MDP_VERSION 0x00000100 -#define MDP5_MDP_VERSION_MINOR__MASK 0x00ff0000 -#define MDP5_MDP_VERSION_MINOR__SHIFT 16 -static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val) +static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); } +#define MDP5_MDP_HW_VERSION_STEP__MASK 0x0000ffff +#define MDP5_MDP_HW_VERSION_STEP__SHIFT 0 +static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val) { - return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK; + return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK; } -#define MDP5_MDP_VERSION_MAJOR__MASK 0xf0000000 -#define MDP5_MDP_VERSION_MAJOR__SHIFT 28 -static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val) +#define MDP5_MDP_HW_VERSION_MINOR__MASK 0x0fff0000 +#define MDP5_MDP_HW_VERSION_MINOR__SHIFT 16 +static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val) { - return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK; + return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK; +} +#define MDP5_MDP_HW_VERSION_MAJOR__MASK 0xf0000000 +#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT 28 +static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val) +{ + return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK; } -#define REG_MDP5_DISP_INTF_SEL 0x00000104 -#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff -#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val) +static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); } +#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK 0x000000ff +#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT 0 +static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val) { - return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK; + return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK; } -#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 -#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val) +#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 +#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT 8 +static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val) { - return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK; + return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK; } -#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 -#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val) +#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 +#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT 16 +static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val) { - return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK; + return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK; } -#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000 -#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24 -static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val) +#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK 0xff000000 +#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT 24 +static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val) { - return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK; + return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK; } -#define REG_MDP5_INTR_EN 0x00000110 +static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); } -#define REG_MDP5_INTR_STATUS 0x00000114 +static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); } -#define REG_MDP5_INTR_CLEAR 0x00000118 +static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); } -#define REG_MDP5_HIST_INTR_EN 0x0000011c +static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); } -#define REG_MDP5_HIST_INTR_STATUS 0x00000120 +static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); } -#define REG_MDP5_HIST_INTR_CLEAR 0x00000124 +static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); } -static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; } +static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); } +#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001 -static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; } -#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff -#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 -static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val) +static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; } +#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff +#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 +static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val) { - return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; + return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK; } -#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 -#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 -static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val) +#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 +#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 +static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val) { - return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; + return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK; } -#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 -#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 -static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val) +#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 +#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 +static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val) { - return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; + return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK; } -static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; } +static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; } -static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; } -#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff -#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 -static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val) +static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; } +#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff +#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 +static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val) { - return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK; + return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK; } -#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 -#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 -static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val) +#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 +#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 +static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val) { - return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK; + return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK; } -#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 -#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 -static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val) +#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 +#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 +static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val) { - return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK; + return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK; } static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) { switch (idx) { - case IGC_VIG: return 0x00000300; - case IGC_RGB: return 0x00000310; - case IGC_DMA: return 0x00000320; - case IGC_DSPP: return 0x00000400; + case IGC_VIG: return 0x00000200; + case IGC_RGB: return 0x00000210; + case IGC_DMA: return 0x00000220; + case IGC_DSPP: return 0x00000300; default: return INVALID_IDX(idx); } } -static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); } +static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); } -static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } +static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; } -static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } -#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff -#define MDP5_IGC_LUT_REG_VAL__SHIFT 0 -static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val) +static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; } +#define MDP5_MDP_IGC_LUT_REG_VAL__MASK 0x00000fff +#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT 0 +static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val) { - return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK; + return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK; } -#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000 -#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 -#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 -#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 +#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE 0x02000000 +#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 +#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 +#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 + +#define REG_MDP5_SPLIT_DPL_EN 0x000003f4 + +#define REG_MDP5_SPLIT_DPL_UPPER 0x000003f8 +#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 +#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 +#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 +#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 + +#define REG_MDP5_SPLIT_DPL_LOWER 0x000004f0 +#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 +#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 +#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 +#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 static inline uint32_t __offset_CTL(uint32_t idx) { @@ -437,11 +458,19 @@ static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __o #define MDP5_CTL_FLUSH_DSPP0 0x00002000 #define MDP5_CTL_FLUSH_DSPP1 0x00004000 #define MDP5_CTL_FLUSH_DSPP2 0x00008000 +#define MDP5_CTL_FLUSH_WB 0x00010000 #define MDP5_CTL_FLUSH_CTL 0x00020000 #define MDP5_CTL_FLUSH_VIG3 0x00040000 #define MDP5_CTL_FLUSH_RGB3 0x00080000 #define MDP5_CTL_FLUSH_LM5 0x00100000 #define MDP5_CTL_FLUSH_DSPP3 0x00200000 +#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000 +#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000 +#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000 +#define MDP5_CTL_FLUSH_TIMING_3 0x10000000 +#define MDP5_CTL_FLUSH_TIMING_2 0x20000000 +#define MDP5_CTL_FLUSH_TIMING_1 0x40000000 +#define MDP5_CTL_FLUSH_TIMING_0 0x80000000 static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); } @@ -1117,6 +1146,94 @@ static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); } +static inline uint32_t __offset_PP(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->pp.base[0]); + case 1: return (mdp5_cfg->pp.base[1]); + case 2: return (mdp5_cfg->pp.base[2]); + case 3: return (mdp5_cfg->pp.base[3]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); } +#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff +#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0 +static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK; +} +#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000 +#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000 + +static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); } +#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff +#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0 +static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK; +} +#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000 +#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16 +static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK; +} + +static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); } +#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff +#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0 +static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK; +} +#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000 +#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16 +static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK; +} + +static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); } +#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff +#define MDP5_PP_SYNC_THRESH_START__SHIFT 0 +static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK; +} +#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000 +#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16 +static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK; +} + +static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); } + static inline uint32_t __offset_INTF(uint32_t idx) { switch (idx) { diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index b0a4431..e001e6b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -24,13 +24,23 @@ const struct mdp5_cfg_hw *mdp5_cfg = NULL; const struct mdp5_cfg_hw msm8x74_config = { .name = "msm8x74", + .mdp = { + .count = 1, + .base = { 0x00100 }, + }, .smp = { .mmb_count = 22, .mmb_size = 4096, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18, + }, }, .ctl = { .count = 5, .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, + .flush_hw_mask = 0x0003ffff, }, .pipe_vig = { .count = 3, @@ -57,27 +67,49 @@ const struct mdp5_cfg_hw msm8x74_config = { .count = 2, .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */ }, + .pp = { + .count = 3, + .base = { 0x12d00, 0x12e00, 0x12f00 }, + }, .intf = { .count = 4, .base = { 0x12500, 0x12700, 0x12900, 0x12b00 }, }, + .intfs = { + [0] = INTF_eDP, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, .max_clk = 200000000, }; const struct mdp5_cfg_hw apq8084_config = { .name = "apq8084", + .mdp = { + .count = 1, + .base = { 0x00100 }, + }, .smp = { .mmb_count = 44, .mmb_size = 8192, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, + [SSPP_VIG2] = 7, [SSPP_VIG3] = 19, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, + [SSPP_RGB2] = 18, [SSPP_RGB3] = 22, + }, .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */ - .reserved[CID_RGB0] = 2, - .reserved[CID_RGB1] = 2, - .reserved[CID_RGB2] = 2, - .reserved[CID_RGB3] = 2, + .reserved = { + /* Two SMP blocks are statically tied to RGB pipes: */ + [16] = 2, [17] = 2, [18] = 2, [22] = 2, + }, }, .ctl = { .count = 5, .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, + .flush_hw_mask = 0x003fffff, }, .pipe_vig = { .count = 4, @@ -105,10 +137,69 @@ const struct mdp5_cfg_hw apq8084_config = { .count = 3, .base = { 0x13500, 0x13700, 0x13900 }, }, + .pp = { + .count = 4, + .base = { 0x12f00, 0x13000, 0x13100, 0x13200 }, + }, .intf = { .count = 5, .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 }, }, + .intfs = { + [0] = INTF_eDP, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + .max_clk = 320000000, +}; + +const struct mdp5_cfg_hw msm8x16_config = { + .name = "msm8x16", + .mdp = { + .count = 1, + .base = { 0x01000 }, + }, + .smp = { + .mmb_count = 8, + .mmb_size = 8192, + .clients = { + [SSPP_VIG0] = 1, [SSPP_DMA0] = 4, + [SSPP_RGB0] = 7, [SSPP_RGB1] = 8, + }, + }, + .ctl = { + .count = 5, + .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, + .flush_hw_mask = 0x4003ffff, + }, + .pipe_vig = { + .count = 1, + .base = { 0x05000 }, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x15000, 0x17000 }, + }, + .pipe_dma = { + .count = 1, + .base = { 0x25000 }, + }, + .lm = { + .count = 2, /* LM0 and LM3 */ + .base = { 0x45000, 0x48000 }, + .nb_stages = 5, + }, + .dspp = { + .count = 1, + .base = { 0x55000 }, + + }, + .intf = { + .count = 1, /* INTF_1 */ + .base = { 0x6B800 }, + }, + /* TODO enable .intfs[] with [1] = INTF_DSI, once DSI is implemented */ .max_clk = 320000000, }; @@ -116,6 +207,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = { { .revision = 0, .config = { .hw = &msm8x74_config } }, { .revision = 2, .config = { .hw = &msm8x74_config } }, { .revision = 3, .config = { .hw = &apq8084_config } }, + { .revision = 6, .config = { .hw = &msm8x16_config } }, }; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h index dba4d52..3a551b0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h @@ -44,26 +44,38 @@ struct mdp5_lm_block { uint32_t nb_stages; /* number of stages per blender */ }; +struct mdp5_ctl_block { + MDP5_SUB_BLOCK_DEFINITION; + uint32_t flush_hw_mask; /* FLUSH register's hardware mask */ +}; + struct mdp5_smp_block { int mmb_count; /* number of SMP MMBs */ int mmb_size; /* MMB: size in bytes */ + uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */ mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */ int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */ }; +#define MDP5_INTF_NUM_MAX 5 + struct mdp5_cfg_hw { char *name; + struct mdp5_sub_block mdp; struct mdp5_smp_block smp; - struct mdp5_sub_block ctl; + struct mdp5_ctl_block ctl; struct mdp5_sub_block pipe_vig; struct mdp5_sub_block pipe_rgb; struct mdp5_sub_block pipe_dma; struct mdp5_lm_block lm; struct mdp5_sub_block dspp; struct mdp5_sub_block ad; + struct mdp5_sub_block pp; struct mdp5_sub_block intf; + u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */ + uint32_t max_clk; }; @@ -84,6 +96,10 @@ const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hn struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd); int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd); +#define mdp5_cfg_intf_is_virtual(intf_type) ({ \ + typeof(intf_type) __val = (intf_type); \ + (__val) >= INTF_VIRTUAL ? true : false; }) + struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, uint32_t major, uint32_t minor); void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c new file mode 100644 index 0000000..e4e8956 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mdp5_kms.h" + +#include "drm_crtc.h" +#include "drm_crtc_helper.h" + +struct mdp5_cmd_encoder { + struct drm_encoder base; + struct mdp5_interface intf; + bool enabled; + uint32_t bsc; +}; +#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base) + +static struct mdp5_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +#ifdef CONFIG_MSM_BUS_SCALING +#include <mach/board.h> +#include <linux/msm-bus.h> +#include <linux/msm-bus-board.h> +#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ + { \ + .src = MSM_BUS_MASTER_MDP_PORT0, \ + .dst = MSM_BUS_SLAVE_EBI_CH0, \ + .ab = (ab_val), \ + .ib = (ib_val), \ + } + +static struct msm_bus_vectors mdp_bus_vectors[] = { + MDP_BUS_VECTOR_ENTRY(0, 0), + MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), +}; +static struct msm_bus_paths mdp_bus_usecases[] = { { + .num_paths = 1, + .vectors = &mdp_bus_vectors[0], +}, { + .num_paths = 1, + .vectors = &mdp_bus_vectors[1], +} }; +static struct msm_bus_scale_pdata mdp_bus_scale_table = { + .usecase = mdp_bus_usecases, + .num_usecases = ARRAY_SIZE(mdp_bus_usecases), + .name = "mdss_mdp", +}; + +static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) +{ + mdp5_cmd_enc->bsc = msm_bus_scale_register_client( + &mdp_bus_scale_table); + DBG("bus scale client: %08x", mdp5_cmd_enc->bsc); +} + +static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) +{ + if (mdp5_cmd_enc->bsc) { + msm_bus_scale_unregister_client(mdp5_cmd_enc->bsc); + mdp5_cmd_enc->bsc = 0; + } +} + +static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) +{ + if (mdp5_cmd_enc->bsc) { + DBG("set bus scaling: %d", idx); + /* HACK: scaling down, and then immediately back up + * seems to leave things broken (underflow).. so + * never disable: + */ + idx = 1; + msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx); + } +} +#else +static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) {} +static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) {} +static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) {} +#endif + +#define VSYNC_CLK_RATE 19200000 +static int pingpong_tearcheck_setup(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct device *dev = encoder->dev->dev; + u32 total_lines_x100, vclks_line, cfg; + long vsync_clk_speed; + int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); + + if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) { + dev_err(dev, "vsync_clk is not initialized\n"); + return -EINVAL; + } + + total_lines_x100 = mode->vtotal * mode->vrefresh; + if (!total_lines_x100) { + dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n", + __func__, mode->vtotal, mode->vrefresh); + return -EINVAL; + } + + vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE); + if (vsync_clk_speed <= 0) { + dev_err(dev, "vsync_clk round rate failed %ld\n", + vsync_clk_speed); + return -EINVAL; + } + vclks_line = vsync_clk_speed * 100 / total_lines_x100; + + cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN + | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN; + cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line); + + mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg); + mdp5_write(mdp5_kms, + REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0); + mdp5_write(mdp5_kms, + REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay); + mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1); + mdp5_write(mdp5_kms, REG_MDP5_PP_START_POS(pp_id), mode->vdisplay); + mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id), + MDP5_PP_SYNC_THRESH_START(4) | + MDP5_PP_SYNC_THRESH_CONTINUE(4)); + + return 0; +} + +static int pingpong_tearcheck_enable(struct drm_encoder *encoder) +{ + struct mdp5_kms *mdp5_kms = get_kms(encoder); + int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); + int ret; + + ret = clk_set_rate(mdp5_kms->vsync_clk, + clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE)); + if (ret) { + dev_err(encoder->dev->dev, + "vsync_clk clk_set_rate failed, %d\n", ret); + return ret; + } + ret = clk_prepare_enable(mdp5_kms->vsync_clk); + if (ret) { + dev_err(encoder->dev->dev, + "vsync_clk clk_prepare_enable failed, %d\n", ret); + return ret; + } + + mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 1); + + return 0; +} + +static void pingpong_tearcheck_disable(struct drm_encoder *encoder) +{ + struct mdp5_kms *mdp5_kms = get_kms(encoder); + int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); + + mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0); + clk_disable_unprepare(mdp5_kms->vsync_clk); +} + +static void mdp5_cmd_encoder_destroy(struct drm_encoder *encoder) +{ + struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + bs_fini(mdp5_cmd_enc); + drm_encoder_cleanup(encoder); + kfree(mdp5_cmd_enc); +} + +static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = { + .destroy = mdp5_cmd_encoder_destroy, +}; + +static bool mdp5_cmd_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + + mode = adjusted_mode; + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + pingpong_tearcheck_setup(encoder, mode); + mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf); +} + +static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + struct mdp5_interface *intf = &mdp5_cmd_enc->intf; + int lm = mdp5_crtc_get_lm(encoder->crtc); + + if (WARN_ON(!mdp5_cmd_enc->enabled)) + return; + + /* Wait for the last frame done */ + mdp_irq_wait(&mdp5_kms->base, lm2ppdone(lm)); + pingpong_tearcheck_disable(encoder); + + mdp5_ctl_set_encoder_state(ctl, false); + mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); + + bs_set(mdp5_cmd_enc, 0); + + mdp5_cmd_enc->enabled = false; +} + +static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) +{ + struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + struct mdp5_interface *intf = &mdp5_cmd_enc->intf; + + if (WARN_ON(mdp5_cmd_enc->enabled)) + return; + + bs_set(mdp5_cmd_enc, 1); + if (pingpong_tearcheck_enable(encoder)) + return; + + mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); + + mdp5_ctl_set_encoder_state(ctl, true); + + mdp5_cmd_enc->enabled = true; +} + +static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = { + .mode_fixup = mdp5_cmd_encoder_mode_fixup, + .mode_set = mdp5_cmd_encoder_mode_set, + .disable = mdp5_cmd_encoder_disable, + .enable = mdp5_cmd_encoder_enable, +}; + +int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder) +{ + struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + struct mdp5_kms *mdp5_kms; + int intf_num; + u32 data = 0; + + if (!encoder || !slave_encoder) + return -EINVAL; + + mdp5_kms = get_kms(encoder); + intf_num = mdp5_cmd_enc->intf.num; + + /* Switch slave encoder's trigger MUX, to use the master's + * start signal for the slave encoder + */ + if (intf_num == 1) + data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; + else if (intf_num == 2) + data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; + else + return -EINVAL; + + /* Smart Panel, Sync mode */ + data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL; + + /* Make sure clocks are on when connectors calling this function. */ + mdp5_enable(mdp5_kms); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data); + + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, + MDP5_SPLIT_DPL_LOWER_SMART_PANEL); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); + mdp5_disable(mdp5_kms); + + return 0; +} + +/* initialize command mode encoder */ +struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, + struct mdp5_interface *intf) +{ + struct drm_encoder *encoder = NULL; + struct mdp5_cmd_encoder *mdp5_cmd_enc; + int ret; + + if (WARN_ON((intf->type != INTF_DSI) && + (intf->mode != MDP5_INTF_DSI_MODE_COMMAND))) { + ret = -EINVAL; + goto fail; + } + + mdp5_cmd_enc = kzalloc(sizeof(*mdp5_cmd_enc), GFP_KERNEL); + if (!mdp5_cmd_enc) { + ret = -ENOMEM; + goto fail; + } + + memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf)); + encoder = &mdp5_cmd_enc->base; + + drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs, + DRM_MODE_ENCODER_DSI); + + drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs); + + bs_init(mdp5_cmd_enc); + + return encoder; + +fail: + if (encoder) + mdp5_cmd_encoder_destroy(encoder); + + return ERR_PTR(ret); +} + diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 2f2863c..c153077 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -82,8 +82,6 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending) mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); } -#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm) - static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); @@ -110,8 +108,8 @@ static void crtc_flush_all(struct drm_crtc *crtc) drm_atomic_crtc_for_each_plane(plane, crtc) { flush_mask |= mdp5_plane_get_flush(plane); } - flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl); - flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm); + + flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm); crtc_flush(crtc, flush_mask); } @@ -298,8 +296,6 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc) mdp5_enable(mdp5_kms); mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); - crtc_flush_all(crtc); - mdp5_crtc->enabled = true; } @@ -444,13 +440,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct drm_device *dev = crtc->dev; struct mdp5_kms *mdp5_kms = get_kms(crtc); - struct drm_gem_object *cursor_bo, *old_bo; + struct drm_gem_object *cursor_bo, *old_bo = NULL; uint32_t blendcfg, cursor_addr, stride; int ret, bpp, lm; unsigned int depth; enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); uint32_t roi_w, roi_h; + bool cursor_enable = true; unsigned long flags; if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { @@ -463,7 +460,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, if (!handle) { DBG("Cursor off"); - return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false); + cursor_enable = false; + goto set_cursor; } cursor_bo = drm_gem_object_lookup(dev, file, handle); @@ -504,11 +502,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); - ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); - if (ret) +set_cursor: + ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable); + if (ret) { + dev_err(dev->dev, "failed to %sable cursor: %d\n", + cursor_enable ? "en" : "dis", ret); goto end; + } - flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl); crtc_flush(crtc, flush_mask); end: @@ -613,64 +614,39 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) } /* set interface for routing crtc->encoder: */ -void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, - enum mdp5_intf intf_id) +void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); - uint32_t flush_mask = 0; - uint32_t intf_sel; - unsigned long flags; + int lm = mdp5_crtc_get_lm(crtc); /* now that we know what irq's we want: */ - mdp5_crtc->err.irqmask = intf2err(intf); - mdp5_crtc->vblank.irqmask = intf2vblank(intf); - mdp_irq_update(&mdp5_kms->base); - - spin_lock_irqsave(&mdp5_kms->resource_lock, flags); - intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); - - switch (intf) { - case 0: - intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK; - intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id); - break; - case 1: - intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK; - intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id); - break; - case 2: - intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK; - intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id); - break; - case 3: - intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK; - intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id); - break; - default: - BUG(); - break; - } + mdp5_crtc->err.irqmask = intf2err(intf->num); - mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); - spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); + /* Register command mode Pingpong done as vblank for now, + * so that atomic commit should wait for it to finish. + * Ideally, in the future, we should take rd_ptr done as vblank, + * and let atomic commit wait for pingpong done for commond mode. + */ + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_crtc->vblank.irqmask = lm2ppdone(lm); + else + mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf); + mdp_irq_update(&mdp5_kms->base); - DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel); mdp5_ctl_set_intf(mdp5_crtc->ctl, intf); - flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl); - flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm); - - crtc_flush(crtc, flush_mask); } int mdp5_crtc_get_lm(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm; +} - if (WARN_ON(!crtc)) - return -EINVAL; - - return mdp5_crtc->lm; +struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl; } /* initialize crtc */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index 1511290..5488b68 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -33,23 +33,31 @@ * requested by the client (in mdp5_crtc_mode_set()). */ +struct op_mode { + struct mdp5_interface intf; + + bool encoder_enabled; + uint32_t start_mask; +}; + struct mdp5_ctl { struct mdp5_ctl_manager *ctlm; u32 id; + int lm; /* whether this CTL has been allocated or not: */ bool busy; - /* memory output connection (@see mdp5_ctl_mode): */ - u32 mode; + /* Operation Mode Configuration for the Pipeline */ + struct op_mode pipeline; /* REG_MDP5_CTL_*(<id>) registers access info + lock: */ spinlock_t hw_lock; u32 reg_offset; - /* flush mask used to commit CTL registers */ - u32 flush_mask; + /* when do CTL registers need to be flushed? (mask of trigger bits) */ + u32 pending_ctl_trigger; bool cursor_on; @@ -63,6 +71,9 @@ struct mdp5_ctl_manager { u32 nlm; u32 nctl; + /* to filter out non-present bits in the current hardware config */ + u32 flush_hw_mask; + /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ spinlock_t pool_lock; struct mdp5_ctl ctls[MAX_CTL]; @@ -94,31 +105,172 @@ u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) return mdp5_read(mdp5_kms, reg); } +static void set_display_intf(struct mdp5_kms *mdp5_kms, + struct mdp5_interface *intf) +{ + unsigned long flags; + u32 intf_sel; + + spin_lock_irqsave(&mdp5_kms->resource_lock, flags); + intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0)); + + switch (intf->num) { + case 0: + intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK; + intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type); + break; + case 1: + intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK; + intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type); + break; + case 2: + intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK; + intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type); + break; + case 3: + intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK; + intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type); + break; + default: + BUG(); + break; + } + + mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel); + spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); +} -int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf) +static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf) { unsigned long flags; - static const enum mdp5_intfnum intfnum[] = { - INTF0, INTF1, INTF2, INTF3, - }; + u32 ctl_op = 0; + + if (!mdp5_cfg_intf_is_virtual(intf->type)) + ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num); + + switch (intf->type) { + case INTF_DSI: + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + ctl_op |= MDP5_CTL_OP_CMD_MODE; + break; + + case INTF_WB: + if (intf->mode == MDP5_INTF_WB_MODE_LINE) + ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE); + break; + + default: + break; + } spin_lock_irqsave(&ctl->hw_lock, flags); - ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), - MDP5_CTL_OP_MODE(ctl->mode) | - MDP5_CTL_OP_INTF_NUM(intfnum[intf])); + ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); spin_unlock_irqrestore(&ctl->hw_lock, flags); +} + +int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + + memcpy(&ctl->pipeline.intf, intf, sizeof(*intf)); + + ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) | + mdp_ctl_flush_mask_encoder(intf); + + /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ + if (!mdp5_cfg_intf_is_virtual(intf->type)) + set_display_intf(mdp5_kms, intf); + + set_ctl_op(ctl, intf); return 0; } -int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable) +static bool start_signal_needed(struct mdp5_ctl *ctl) +{ + struct op_mode *pipeline = &ctl->pipeline; + + if (!pipeline->encoder_enabled || pipeline->start_mask != 0) + return false; + + switch (pipeline->intf.type) { + case INTF_WB: + return true; + case INTF_DSI: + return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND; + default: + return false; + } +} + +/* + * send_start_signal() - Overlay Processor Start Signal + * + * For a given control operation (display pipeline), a START signal needs to be + * executed in order to kick off operation and activate all layers. + * e.g.: DSI command mode, Writeback + */ +static void send_start_signal(struct mdp5_ctl *ctl) +{ + unsigned long flags; + + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1); + spin_unlock_irqrestore(&ctl->hw_lock, flags); +} + +static void refill_start_mask(struct mdp5_ctl *ctl) +{ + struct op_mode *pipeline = &ctl->pipeline; + struct mdp5_interface *intf = &ctl->pipeline.intf; + + pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm); + + /* + * Writeback encoder needs to program & flush + * address registers for each page flip.. + */ + if (intf->type == INTF_WB) + pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf); +} + +/** + * mdp5_ctl_set_encoder_state() - set the encoder state + * + * @enable: true, when encoder is ready for data streaming; false, otherwise. + * + * Note: + * This encoder state is needed to trigger START signal (data path kickoff). + */ +int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled) +{ + if (WARN_ON(!ctl)) + return -EINVAL; + + ctl->pipeline.encoder_enabled = enabled; + DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off"); + + if (start_signal_needed(ctl)) { + send_start_signal(ctl); + refill_start_mask(ctl); + } + + return 0; +} + +/* + * Note: + * CTL registers need to be flushed after calling this function + * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) + */ +int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; unsigned long flags; u32 blend_cfg; - int lm; + int lm = ctl->lm; - lm = mdp5_crtc_get_lm(ctl->crtc); if (unlikely(WARN_ON(lm < 0))) { dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", ctl->id, lm); @@ -138,12 +290,12 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable) spin_unlock_irqrestore(&ctl->hw_lock, flags); + ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); ctl->cursor_on = enable; return 0; } - int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg) { unsigned long flags; @@ -157,37 +309,122 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg) ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); spin_unlock_irqrestore(&ctl->hw_lock, flags); + ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm); + return 0; } +u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf) +{ + if (intf->type == INTF_WB) + return MDP5_CTL_FLUSH_WB; + + switch (intf->num) { + case 0: return MDP5_CTL_FLUSH_TIMING_0; + case 1: return MDP5_CTL_FLUSH_TIMING_1; + case 2: return MDP5_CTL_FLUSH_TIMING_2; + case 3: return MDP5_CTL_FLUSH_TIMING_3; + default: return 0; + } +} + +u32 mdp_ctl_flush_mask_cursor(int cursor_id) +{ + switch (cursor_id) { + case 0: return MDP5_CTL_FLUSH_CURSOR_0; + case 1: return MDP5_CTL_FLUSH_CURSOR_1; + default: return 0; + } +} + +u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe) +{ + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0; + case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1; + case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2; + case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0; + case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1; + case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; + case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; + case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; + case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; + case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; + default: return 0; + } +} + +u32 mdp_ctl_flush_mask_lm(int lm) +{ + switch (lm) { + case 0: return MDP5_CTL_FLUSH_LM0; + case 1: return MDP5_CTL_FLUSH_LM1; + case 2: return MDP5_CTL_FLUSH_LM2; + case 5: return MDP5_CTL_FLUSH_LM5; + default: return 0; + } +} + +static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + u32 sw_mask = 0; +#define BIT_NEEDS_SW_FIX(bit) \ + (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit)) + + /* for some targets, cursor bit is the same as LM bit */ + if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0)) + sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm); + + return sw_mask; +} + +/** + * mdp5_ctl_commit() - Register Flush + * + * The flush register is used to indicate several registers are all + * programmed, and are safe to update to the back copy of the double + * buffered registers. + * + * Some registers FLUSH bits are shared when the hardware does not have + * dedicated bits for them; handling these is the job of fix_sw_flush(). + * + * CTL registers need to be flushed in some circumstances; if that is the + * case, some trigger bits will be present in both flush mask and + * ctl->pending_ctl_trigger. + */ int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + struct op_mode *pipeline = &ctl->pipeline; unsigned long flags; - if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) { - int lm = mdp5_crtc_get_lm(ctl->crtc); + pipeline->start_mask &= ~flush_mask; - if (unlikely(WARN_ON(lm < 0))) { - dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", - ctl->id, lm); - return -EINVAL; - } + VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask, + pipeline->start_mask, ctl->pending_ctl_trigger); - /* for current targets, cursor bit is the same as LM bit */ - flush_mask |= mdp_ctl_flush_mask_lm(lm); + if (ctl->pending_ctl_trigger & flush_mask) { + flush_mask |= MDP5_CTL_FLUSH_CTL; + ctl->pending_ctl_trigger = 0; } - spin_lock_irqsave(&ctl->hw_lock, flags); - ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask); - spin_unlock_irqrestore(&ctl->hw_lock, flags); + flush_mask |= fix_sw_flush(ctl, flush_mask); - return 0; -} + flush_mask &= ctl_mgr->flush_hw_mask; -u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl) -{ - return ctl->flush_mask; + if (flush_mask) { + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask); + spin_unlock_irqrestore(&ctl->hw_lock, flags); + } + + if (start_signal_needed(ctl)) { + send_start_signal(ctl); + refill_start_mask(ctl); + } + + return 0; } void mdp5_ctl_release(struct mdp5_ctl *ctl) @@ -208,6 +445,11 @@ void mdp5_ctl_release(struct mdp5_ctl *ctl) DBG("CTL %d released", ctl->id); } +int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) +{ + return WARN_ON(!ctl) ? -EINVAL : ctl->id; +} + /* * mdp5_ctl_request() - CTL dynamic allocation * @@ -235,8 +477,10 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, ctl = &ctl_mgr->ctls[c]; + ctl->lm = mdp5_crtc_get_lm(crtc); ctl->crtc = crtc; ctl->busy = true; + ctl->pending_ctl_trigger = 0; DBG("CTL %d allocated", ctl->id); unlock: @@ -267,7 +511,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg) { struct mdp5_ctl_manager *ctl_mgr; - const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl; + const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; unsigned long flags; int c, ret; @@ -289,6 +533,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, ctl_mgr->dev = dev; ctl_mgr->nlm = hw_cfg->lm.count; ctl_mgr->nctl = ctl_cfg->count; + ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask; spin_lock_init(&ctl_mgr->pool_lock); /* initialize each CTL of the pool: */ @@ -303,9 +548,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, } ctl->ctlm = ctl_mgr; ctl->id = c; - ctl->mode = MODE_NONE; ctl->reg_offset = ctl_cfg->base[c]; - ctl->flush_mask = MDP5_CTL_FLUSH_CTL; ctl->busy = false; spin_lock_init(&ctl->hw_lock); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h index ad48788..7a62000 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h @@ -33,19 +33,13 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); * which is then used to call the other mdp5_ctl_*(ctl, ...) functions. */ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc); +int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); -int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf); +struct mdp5_interface; +int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf); +int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled); -int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable); - -/* @blend_cfg: see LM blender config definition below */ -int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg); - -/* @flush_mask: see CTL flush masks definitions below */ -int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); -u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl); - -void mdp5_ctl_release(struct mdp5_ctl *ctl); +int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable); /* * blend_cfg (LM blender config): @@ -72,51 +66,32 @@ static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, } /* - * flush_mask (CTL flush masks): + * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM) + * + * @blend_cfg: see LM blender config definition below * - * The following functions allow each DRM entity to get and store - * their own flush mask. - * Once stored, these masks will then be accessed through each DRM's - * interface and used by the caller of mdp5_ctl_commit() to specify - * which block(s) need to be flushed through @flush_mask parameter. + * Note: + * CTL registers need to be flushed after calling this function + * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) */ +int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg); -#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000 +/** + * mdp_ctl_flush_mask...() - Register FLUSH masks + * + * These masks are used to specify which block(s) need to be flushed + * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask). + */ +u32 mdp_ctl_flush_mask_lm(int lm); +u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe); +u32 mdp_ctl_flush_mask_cursor(int cursor_id); +u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); -static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id) -{ - /* TODO: use id once multiple cursor support is present */ - (void)cursor_id; +/* @flush_mask: see CTL flush masks definitions below */ +int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); - return MDP5_CTL_FLUSH_CURSOR_DUMMY; -} +void mdp5_ctl_release(struct mdp5_ctl *ctl); -static inline u32 mdp_ctl_flush_mask_lm(int lm) -{ - switch (lm) { - case 0: return MDP5_CTL_FLUSH_LM0; - case 1: return MDP5_CTL_FLUSH_LM1; - case 2: return MDP5_CTL_FLUSH_LM2; - case 5: return MDP5_CTL_FLUSH_LM5; - default: return 0; - } -} -static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe) -{ - switch (pipe) { - case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0; - case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1; - case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2; - case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0; - case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1; - case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; - case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; - case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; - case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; - case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; - default: return 0; - } -} #endif /* __MDP5_CTL_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index af0e02f..1188f4b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -23,8 +23,7 @@ struct mdp5_encoder { struct drm_encoder base; - int intf; - enum mdp5_intf intf_id; + struct mdp5_interface intf; spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ bool enabled; uint32_t bsc; @@ -126,7 +125,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, struct mdp5_kms *mdp5_kms = get_kms(encoder); struct drm_device *dev = encoder->dev; struct drm_connector *connector; - int intf = mdp5_encoder->intf; + int intf = mdp5_encoder->intf.num; uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; uint32_t display_v_start, display_v_end; uint32_t hsync_start_x, hsync_end_x; @@ -188,7 +187,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, * DISPLAY_V_START = (VBP * HCYCLE) + HBP * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP */ - if (mdp5_encoder->intf_id == INTF_eDP) { + if (mdp5_encoder->intf.type == INTF_eDP) { display_v_start += mode->htotal - mode->hsync_start; display_v_end -= mode->hsync_start - mode->hdisplay; } @@ -218,21 +217,29 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + + mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf); } static void mdp5_encoder_disable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); - int intf = mdp5_encoder->intf; + struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + int lm = mdp5_crtc_get_lm(encoder->crtc); + struct mdp5_interface *intf = &mdp5_encoder->intf; + int intfn = mdp5_encoder->intf.num; unsigned long flags; if (WARN_ON(!mdp5_encoder->enabled)) return; + mdp5_ctl_set_encoder_state(ctl, false); + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); /* * Wait for a vsync so we know the ENABLE=0 latched before @@ -242,7 +249,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder) * the settings changes for the new modeset (like new * scanout buffer) don't latch properly.. */ - mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf)); + mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf)); bs_set(mdp5_encoder, 0); @@ -253,19 +260,21 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); - int intf = mdp5_encoder->intf; + struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); + struct mdp5_interface *intf = &mdp5_encoder->intf; + int intfn = mdp5_encoder->intf.num; unsigned long flags; if (WARN_ON(mdp5_encoder->enabled)) return; - mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf, - mdp5_encoder->intf_id); - bs_set(mdp5_encoder, 1); spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); + + mdp5_ctl_set_encoder_state(ctl, true); mdp5_encoder->enabled = true; } @@ -277,12 +286,51 @@ static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { .enable = mdp5_encoder_enable, }; +int mdp5_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms; + int intf_num; + u32 data = 0; + + if (!encoder || !slave_encoder) + return -EINVAL; + + mdp5_kms = get_kms(encoder); + intf_num = mdp5_encoder->intf.num; + + /* Switch slave encoder's TimingGen Sync mode, + * to use the master's enable signal for the slave encoder. + */ + if (intf_num == 1) + data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC; + else if (intf_num == 2) + data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC; + else + return -EINVAL; + + /* Make sure clocks are on when connectors calling this function. */ + mdp5_enable(mdp5_kms); + mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), + MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); + /* Dumb Panel, Sync mode */ + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); + mdp5_disable(mdp5_kms); + + return 0; +} + /* initialize encoder */ -struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf, - enum mdp5_intf intf_id) +struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, + struct mdp5_interface *intf) { struct drm_encoder *encoder = NULL; struct mdp5_encoder *mdp5_encoder; + int enc_type = (intf->type == INTF_DSI) ? + DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS; int ret; mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL); @@ -291,14 +339,13 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf, goto fail; } - mdp5_encoder->intf = intf; - mdp5_encoder->intf_id = intf_id; + memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf)); encoder = &mdp5_encoder->base; spin_lock_init(&mdp5_encoder->intf_lock); - drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type); + drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); bs_init(mdp5_encoder); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index a940710..33bd4c6 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -23,7 +23,7 @@ void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask) { - mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask); + mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask); } static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) @@ -35,8 +35,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); mdp5_enable(mdp5_kms); - mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); - mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); + mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff); + mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000); mdp5_disable(mdp5_kms); } @@ -61,7 +61,7 @@ void mdp5_irq_uninstall(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); mdp5_enable(mdp5_kms); - mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); + mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000); mdp5_disable(mdp5_kms); } @@ -73,8 +73,8 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) unsigned int id; uint32_t status; - status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS); - mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status); + status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)); + mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status); VERB("status=%08x", status); @@ -91,13 +91,13 @@ irqreturn_t mdp5_irq(struct msm_kms *kms) struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); uint32_t intr; - intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS); + intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS); VERB("intr=%08x", intr); - if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) { + if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) { mdp5_irq_mdp(mdp_kms); - intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP; + intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP; } while (intr) { @@ -128,10 +128,10 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) * can register to get their irq's delivered */ -#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \ - MDP5_HW_INTR_STATUS_INTR_DSI1 | \ - MDP5_HW_INTR_STATUS_INTR_HDMI | \ - MDP5_HW_INTR_STATUS_INTR_EDP) +#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \ + MDSS_HW_INTR_STATUS_INTR_DSI1 | \ + MDSS_HW_INTR_STATUS_INTR_HDMI | \ + MDSS_HW_INTR_STATUS_INTR_EDP) static void mdp5_hw_mask_irq(struct irq_data *irqd) { diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 92b61db..dfa8beb 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -58,7 +58,7 @@ static int mdp5_hw_init(struct msm_kms *kms) */ spin_lock_irqsave(&mdp5_kms->resource_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); + mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0); spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); mdp5_ctlm_hw_reset(mdp5_kms->ctlm); @@ -86,6 +86,18 @@ static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, return rate; } +static int mdp5_set_split_display(struct msm_kms *kms, + struct drm_encoder *encoder, + struct drm_encoder *slave_encoder, + bool is_cmd_mode) +{ + if (is_cmd_mode) + return mdp5_cmd_encoder_set_split_display(encoder, + slave_encoder); + else + return mdp5_encoder_set_split_display(encoder, slave_encoder); +} + static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); @@ -131,6 +143,7 @@ static const struct mdp_kms_funcs kms_funcs = { .complete_commit = mdp5_complete_commit, .get_format = mdp_get_format, .round_pixclk = mdp5_round_pixclk, + .set_split_display = mdp5_set_split_display, .preclose = mdp5_preclose, .destroy = mdp5_destroy, }, @@ -161,6 +174,134 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) return 0; } +static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, + enum mdp5_intf_type intf_type, int intf_num, + enum mdp5_intf_mode intf_mode) +{ + struct drm_device *dev = mdp5_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + struct drm_encoder *encoder; + struct mdp5_interface intf = { + .num = intf_num, + .type = intf_type, + .mode = intf_mode, + }; + + if ((intf_type == INTF_DSI) && + (intf_mode == MDP5_INTF_DSI_MODE_COMMAND)) + encoder = mdp5_cmd_encoder_init(dev, &intf); + else + encoder = mdp5_encoder_init(dev, &intf); + + if (IS_ERR(encoder)) { + dev_err(dev->dev, "failed to construct encoder\n"); + return encoder; + } + + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; + priv->encoders[priv->num_encoders++] = encoder; + + return encoder; +} + +static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) +{ + const int intf_cnt = hw_cfg->intf.count; + const u32 *intfs = hw_cfg->intfs; + int id = 0, i; + + for (i = 0; i < intf_cnt; i++) { + if (intfs[i] == INTF_DSI) { + if (intf_num == i) + return id; + + id++; + } + } + + return -EINVAL; +} + +static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) +{ + struct drm_device *dev = mdp5_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + const struct mdp5_cfg_hw *hw_cfg = + mdp5_cfg_get_hw_config(mdp5_kms->cfg); + enum mdp5_intf_type intf_type = hw_cfg->intfs[intf_num]; + struct drm_encoder *encoder; + int ret = 0; + + switch (intf_type) { + case INTF_DISABLED: + break; + case INTF_eDP: + if (!priv->edp) + break; + + encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, + MDP5_INTF_MODE_NONE); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + break; + } + + ret = msm_edp_modeset_init(priv->edp, dev, encoder); + break; + case INTF_HDMI: + if (!priv->hdmi) + break; + + encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, + MDP5_INTF_MODE_NONE); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + break; + } + + ret = hdmi_modeset_init(priv->hdmi, dev, encoder); + break; + case INTF_DSI: + { + int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num); + struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM]; + enum mdp5_intf_mode mode; + int i; + + if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { + dev_err(dev->dev, "failed to find dsi from intf %d\n", + intf_num); + ret = -EINVAL; + break; + } + + if (!priv->dsi[dsi_id]) + break; + + for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { + mode = (i == MSM_DSI_CMD_ENCODER_ID) ? + MDP5_INTF_DSI_MODE_COMMAND : + MDP5_INTF_DSI_MODE_VIDEO; + dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI, + intf_num, mode); + if (IS_ERR(dsi_encs)) { + ret = PTR_ERR(dsi_encs); + break; + } + } + + ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs); + break; + } + default: + dev_err(dev->dev, "unknown intf: %d\n", intf_type); + ret = -EINVAL; + break; + } + + return ret; +} + static int modeset_init(struct mdp5_kms *mdp5_kms) { static const enum mdp5_pipe crtcs[] = { @@ -171,7 +312,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) }; struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; - struct drm_encoder *encoder; const struct mdp5_cfg_hw *hw_cfg; int i, ret; @@ -222,44 +362,13 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) } } - if (priv->hdmi) { - /* Construct encoder for HDMI: */ - encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); - if (IS_ERR(encoder)) { - dev_err(dev->dev, "failed to construct encoder\n"); - ret = PTR_ERR(encoder); - goto fail; - } - - encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;; - priv->encoders[priv->num_encoders++] = encoder; - - ret = hdmi_modeset_init(priv->hdmi, dev, encoder); - if (ret) { - dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); - goto fail; - } - } - - if (priv->edp) { - /* Construct encoder for eDP: */ - encoder = mdp5_encoder_init(dev, 0, INTF_eDP); - if (IS_ERR(encoder)) { - dev_err(dev->dev, "failed to construct eDP encoder\n"); - ret = PTR_ERR(encoder); - goto fail; - } - - encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; - priv->encoders[priv->num_encoders++] = encoder; - - /* Construct bridge/connector for eDP: */ - ret = msm_edp_modeset_init(priv->edp, dev, encoder); - if (ret) { - dev_err(dev->dev, "failed to initialize eDP: %d\n", - ret); + /* Construct encoders and modeset initialize connector devices + * for each external display interface. + */ + for (i = 0; i < ARRAY_SIZE(hw_cfg->intfs); i++) { + ret = modeset_init_intf(mdp5_kms, i); + if (ret) goto fail; - } } return 0; @@ -274,11 +383,11 @@ static void read_hw_revision(struct mdp5_kms *mdp5_kms, uint32_t version; mdp5_enable(mdp5_kms); - version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION); + version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION); mdp5_disable(mdp5_kms); - *major = FIELD(version, MDP5_MDP_VERSION_MAJOR); - *minor = FIELD(version, MDP5_MDP_VERSION_MINOR); + *major = FIELD(version, MDSS_HW_VERSION_MAJOR); + *minor = FIELD(version, MDSS_HW_VERSION_MINOR); DBG("MDP5 version v%d.%d", *major, *minor); } @@ -321,6 +430,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) mdp5_kms->dev = dev; + /* mdp5_kms->mmio actually represents the MDSS base address */ mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); if (IS_ERR(mdp5_kms->mmio)) { ret = PTR_ERR(mdp5_kms->mmio); @@ -403,8 +513,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) * we don't disable): */ mdp5_enable(mdp5_kms); - for (i = 0; i < config->hw->intf.count; i++) + for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { + if (!config->hw->intf.base[i] || + mdp5_cfg_intf_is_virtual(config->hw->intfs[i])) + continue; mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); + } mdp5_disable(mdp5_kms); mdelay(16); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 49d011e..2c0de17 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -54,7 +54,7 @@ struct mdp5_kms { /* * lock to protect access to global resources: ie., following register: - * - REG_MDP5_DISP_INTF_SEL + * - REG_MDP5_MDP_DISP_INTF_SEL */ spinlock_t resource_lock; @@ -94,6 +94,24 @@ struct mdp5_plane_state { #define to_mdp5_plane_state(x) \ container_of(x, struct mdp5_plane_state, base) +enum mdp5_intf_mode { + MDP5_INTF_MODE_NONE = 0, + + /* Modes used for DSI interface (INTF_DSI type): */ + MDP5_INTF_DSI_MODE_VIDEO, + MDP5_INTF_DSI_MODE_COMMAND, + + /* Modes used for WB interface (INTF_WB type): */ + MDP5_INTF_WB_MODE_BLOCK, + MDP5_INTF_WB_MODE_LINE, +}; + +struct mdp5_interface { + int num; /* display interface number */ + enum mdp5_intf_type type; + enum mdp5_intf_mode mode; +}; + static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) { msm_writel(data, mdp5_kms->mmio + reg); @@ -130,9 +148,9 @@ static inline int pipe2nclients(enum mdp5_pipe pipe) } } -static inline uint32_t intf2err(int intf) +static inline uint32_t intf2err(int intf_num) { - switch (intf) { + switch (intf_num) { case 0: return MDP5_IRQ_INTF0_UNDER_RUN; case 1: return MDP5_IRQ_INTF1_UNDER_RUN; case 2: return MDP5_IRQ_INTF2_UNDER_RUN; @@ -141,9 +159,23 @@ static inline uint32_t intf2err(int intf) } } -static inline uint32_t intf2vblank(int intf) +#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer) +static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf) { - switch (intf) { + /* + * In case of DSI Command Mode, the Ping Pong's read pointer IRQ + * acts as a Vblank signal. The Ping Pong buffer used is bound to + * layer mixer. + */ + + if ((intf->type == INTF_DSI) && + (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) + return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm); + + if (intf->type == INTF_WB) + return MDP5_IRQ_WB_2_DONE; + + switch (intf->num) { case 0: return MDP5_IRQ_INTF0_VSYNC; case 1: return MDP5_IRQ_INTF1_VSYNC; case 2: return MDP5_IRQ_INTF2_VSYNC; @@ -152,6 +184,11 @@ static inline uint32_t intf2vblank(int intf) } } +static inline uint32_t lm2ppdone(int lm) +{ + return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm); +} + int mdp5_disable(struct mdp5_kms *mdp5_kms); int mdp5_enable(struct mdp5_kms *mdp5_kms); @@ -197,13 +234,33 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); int mdp5_crtc_get_lm(struct drm_crtc *crtc); +struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); -void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, - enum mdp5_intf intf_id); +void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf); struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, struct drm_plane *plane, int id); -struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf, - enum mdp5_intf intf_id); +struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, + struct mdp5_interface *intf); +int mdp5_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder); + +#ifdef CONFIG_DRM_MSM_DSI +struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, + struct mdp5_interface *intf); +int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder); +#else +static inline struct drm_encoder *mdp5_cmd_encoder_init( + struct drm_device *dev, struct mdp5_interface *intf) +{ + return ERR_PTR(-EINVAL); +} +static inline int mdp5_cmd_encoder_set_split_display( + struct drm_encoder *encoder, struct drm_encoder *slave_encoder) +{ + return -EINVAL; +} +#endif #endif /* __MDP5_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 6bd48e2..18a3d20 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -507,8 +507,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), - MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) | - MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h)); + MDP5_PIPE_SRC_IMG_SIZE_WIDTH(fb->width) | + MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(fb->height)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe), MDP5_PIPE_SRC_SIZE_WIDTH(src_w) | diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index 1f795af..16702ae 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -43,7 +43,7 @@ * set. * * 2) mdp5_smp_configure(): - * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers + * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers * are configured for the union(pending, inuse) * * 3) mdp5_smp_commit(): @@ -74,7 +74,7 @@ struct mdp5_smp { spinlock_t state_lock; mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */ - struct mdp5_client_smp_state client_state[CID_MAX]; + struct mdp5_client_smp_state client_state[MAX_CLIENTS]; }; static inline @@ -85,27 +85,31 @@ struct mdp5_kms *get_kms(struct mdp5_smp *smp) return to_mdp5_kms(to_mdp_kms(priv->kms)); } -static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane) +static inline u32 pipe2client(enum mdp5_pipe pipe, int plane) { - WARN_ON(plane >= pipe2nclients(pipe)); - switch (pipe) { - case SSPP_VIG0: return CID_VIG0_Y + plane; - case SSPP_VIG1: return CID_VIG1_Y + plane; - case SSPP_VIG2: return CID_VIG2_Y + plane; - case SSPP_RGB0: return CID_RGB0; - case SSPP_RGB1: return CID_RGB1; - case SSPP_RGB2: return CID_RGB2; - case SSPP_DMA0: return CID_DMA0_Y + plane; - case SSPP_DMA1: return CID_DMA1_Y + plane; - case SSPP_VIG3: return CID_VIG3_Y + plane; - case SSPP_RGB3: return CID_RGB3; - default: return CID_UNUSED; - } +#define CID_UNUSED 0 + + if (WARN_ON(plane >= pipe2nclients(pipe))) + return CID_UNUSED; + + /* + * Note on SMP clients: + * For ViG pipes, fetch Y/Cr/Cb-components clients are always + * consecutive, and in that order. + * + * e.g.: + * if mdp5_cfg->smp.clients[SSPP_VIG0] = N, + * Y plane's client ID is N + * Cr plane's client ID is N + 1 + * Cb plane's client ID is N + 2 + */ + + return mdp5_cfg->smp.clients[pipe] + plane; } /* step #1: update # of blocks pending for the client: */ static int smp_request_block(struct mdp5_smp *smp, - enum mdp5_client_id cid, int nblks) + u32 cid, int nblks) { struct mdp5_kms *mdp5_kms = get_kms(smp); const struct mdp5_cfg_hw *hw_cfg; @@ -227,7 +231,7 @@ void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) } static void update_smp_state(struct mdp5_smp *smp, - enum mdp5_client_id cid, mdp5_smp_state_t *assigned) + u32 cid, mdp5_smp_state_t *assigned) { struct mdp5_kms *mdp5_kms = get_kms(smp); int cnt = smp->blk_cnt; @@ -237,25 +241,25 @@ static void update_smp_state(struct mdp5_smp *smp, int idx = blk / 3; int fld = blk % 3; - val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx)); + val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx)); switch (fld) { case 0: - val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; - val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid); + val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK; + val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid); break; case 1: - val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; - val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid); + val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK; + val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid); break; case 2: - val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; - val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid); + val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK; + val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid); break; } - mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val); - mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val); + mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val); + mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val); } } @@ -267,7 +271,7 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe) int i; for (i = 0; i < pipe2nclients(pipe); i++) { - enum mdp5_client_id cid = pipe2client(pipe, i); + u32 cid = pipe2client(pipe, i); struct mdp5_client_smp_state *ps = &smp->client_state[cid]; bitmap_or(assigned, ps->inuse, ps->pending, cnt); @@ -283,7 +287,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) int i; for (i = 0; i < pipe2nclients(pipe); i++) { - enum mdp5_client_id cid = pipe2client(pipe, i); + u32 cid = pipe2client(pipe, i); struct mdp5_client_smp_state *ps = &smp->client_state[cid]; /* diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index a426911..47f4dd4 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -182,41 +182,57 @@ static int get_mdp_ver(struct platform_device *pdev) return 4; } -static int msm_load(struct drm_device *dev, unsigned long flags) -{ - struct platform_device *pdev = dev->platformdev; - struct msm_drm_private *priv; - struct msm_kms *kms; - int ret; - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - dev_err(dev->dev, "failed to allocate private data\n"); - return -ENOMEM; - } +#include <linux/of_address.h> - dev->dev_private = priv; - - priv->wq = alloc_ordered_workqueue("msm", 0); - init_waitqueue_head(&priv->fence_event); - init_waitqueue_head(&priv->pending_crtcs_event); - - INIT_LIST_HEAD(&priv->inactive_list); - INIT_LIST_HEAD(&priv->fence_cbs); +static int msm_init_vram(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + unsigned long size = 0; + int ret = 0; - drm_mode_config_init(dev); +#ifdef CONFIG_OF + /* In the device-tree world, we could have a 'memory-region' + * phandle, which gives us a link to our "vram". Allocating + * is all nicely abstracted behind the dma api, but we need + * to know the entire size to allocate it all in one go. There + * are two cases: + * 1) device with no IOMMU, in which case we need exclusive + * access to a VRAM carveout big enough for all gpu + * buffers + * 2) device with IOMMU, but where the bootloader puts up + * a splash screen. In this case, the VRAM carveout + * need only be large enough for fbdev fb. But we need + * exclusive access to the buffer to avoid the kernel + * using those pages for other purposes (which appears + * as corruption on screen before we have a chance to + * load and do initial modeset) + */ + struct device_node *node; + + node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); + if (node) { + struct resource r; + ret = of_address_to_resource(node, 0, &r); + if (ret) + return ret; + size = r.end - r.start; + DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start); + } else +#endif /* if we have no IOMMU, then we need to use carveout allocator. * Grab the entire CMA chunk carved out in early startup in * mach-msm: */ if (!iommu_present(&platform_bus_type)) { + DRM_INFO("using %s VRAM carveout\n", vram); + size = memparse(vram, NULL); + } + + if (size) { DEFINE_DMA_ATTRS(attrs); - unsigned long size; void *p; - DBG("using %s VRAM carveout", vram); - size = memparse(vram, NULL); priv->vram.size = size; drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); @@ -232,8 +248,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags) if (!p) { dev_err(dev->dev, "failed to allocate VRAM\n"); priv->vram.paddr = 0; - ret = -ENOMEM; - goto fail; + return -ENOMEM; } dev_info(dev->dev, "VRAM: %08x->%08x\n", @@ -241,6 +256,37 @@ static int msm_load(struct drm_device *dev, unsigned long flags) (uint32_t)(priv->vram.paddr + size)); } + return ret; +} + +static int msm_load(struct drm_device *dev, unsigned long flags) +{ + struct platform_device *pdev = dev->platformdev; + struct msm_drm_private *priv; + struct msm_kms *kms; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(dev->dev, "failed to allocate private data\n"); + return -ENOMEM; + } + + dev->dev_private = priv; + + priv->wq = alloc_ordered_workqueue("msm", 0); + init_waitqueue_head(&priv->fence_event); + init_waitqueue_head(&priv->pending_crtcs_event); + + INIT_LIST_HEAD(&priv->inactive_list); + INIT_LIST_HEAD(&priv->fence_cbs); + + drm_mode_config_init(dev); + + ret = msm_init_vram(dev); + if (ret) + goto fail; + platform_set_drvdata(pdev, dev); /* Bind all our sub-components: */ @@ -1030,6 +1076,7 @@ static struct platform_driver msm_platform_driver = { static int __init msm_drm_register(void) { DBG("init"); + msm_dsi_register(); msm_edp_register(); hdmi_register(); adreno_register(); @@ -1043,6 +1090,7 @@ static void __exit msm_drm_unregister(void) hdmi_unregister(); adreno_unregister(); msm_edp_unregister(); + msm_dsi_unregister(); } module_init(msm_drm_register); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 9e8d441..04db4bd 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -82,6 +82,9 @@ struct msm_drm_private { */ struct msm_edp *edp; + /* DSI is shared by mdp4 and mdp5 */ + struct msm_dsi *dsi[2]; + /* when we have more than one 'msm_gpu' these need to be an array: */ struct msm_gpu *gpu; struct msm_file_private *lastctx; @@ -236,6 +239,32 @@ void __exit msm_edp_unregister(void); int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, struct drm_encoder *encoder); +struct msm_dsi; +enum msm_dsi_encoder_id { + MSM_DSI_VIDEO_ENCODER_ID = 0, + MSM_DSI_CMD_ENCODER_ID = 1, + MSM_DSI_ENCODER_NUM = 2 +}; +#ifdef CONFIG_DRM_MSM_DSI +void __init msm_dsi_register(void); +void __exit msm_dsi_unregister(void); +int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, + struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]); +#else +static inline void __init msm_dsi_register(void) +{ +} +static inline void __exit msm_dsi_unregister(void) +{ +} +static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, + struct drm_device *dev, + struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) +{ + return -EINVAL; +} +#endif + #ifdef CONFIG_DEBUG_FS void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index df60f65..95f6532 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -110,7 +110,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, size = mode_cmd.pitches[0] * mode_cmd.height; DBG("allocating %d bytes for fb %d", size, dev->primary->index); mutex_lock(&dev->struct_mutex); - fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC); + fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | + MSM_BO_WC | MSM_BO_STOLEN); mutex_unlock(&dev->struct_mutex); if (IS_ERR(fbdev->bo)) { ret = PTR_ERR(fbdev->bo); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 49dea4f..479d8af 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -32,6 +32,12 @@ static dma_addr_t physaddr(struct drm_gem_object *obj) priv->vram.paddr; } +static bool use_pages(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + return !msm_obj->vram_node; +} + /* allocate pages from VRAM carveout, used when no IOMMU: */ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) @@ -72,7 +78,7 @@ static struct page **get_pages(struct drm_gem_object *obj) struct page **p; int npages = obj->size >> PAGE_SHIFT; - if (iommu_present(&platform_bus_type)) + if (use_pages(obj)) p = drm_gem_get_pages(obj); else p = get_pages_vram(obj, npages); @@ -116,7 +122,7 @@ static void put_pages(struct drm_gem_object *obj) sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); - if (iommu_present(&platform_bus_type)) + if (use_pages(obj)) drm_gem_put_pages(obj, msm_obj->pages, true, false); else { drm_mm_remove_node(msm_obj->vram_node); @@ -580,6 +586,7 @@ static int msm_gem_new_impl(struct drm_device *dev, struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; unsigned sz; + bool use_vram = false; switch (flags & MSM_BO_CACHE_MASK) { case MSM_BO_UNCACHED: @@ -592,15 +599,23 @@ static int msm_gem_new_impl(struct drm_device *dev, return -EINVAL; } - sz = sizeof(*msm_obj); if (!iommu_present(&platform_bus_type)) + use_vram = true; + else if ((flags & MSM_BO_STOLEN) && priv->vram.size) + use_vram = true; + + if (WARN_ON(use_vram && !priv->vram.size)) + return -EINVAL; + + sz = sizeof(*msm_obj); + if (use_vram) sz += sizeof(struct drm_mm_node); msm_obj = kzalloc(sz, GFP_KERNEL); if (!msm_obj) return -ENOMEM; - if (!iommu_present(&platform_bus_type)) + if (use_vram) msm_obj->vram_node = (void *)&msm_obj[1]; msm_obj->flags = flags; @@ -630,7 +645,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, if (ret) goto fail; - if (iommu_present(&platform_bus_type)) { + if (use_pages(obj)) { ret = drm_gem_object_init(dev, obj, size); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 8fbbd05..85d481e 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -21,6 +21,9 @@ #include <linux/reservation.h> #include "msm_drv.h" +/* Additional internal-use only BO flags: */ +#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ + struct msm_gem_object { struct drm_gem_object base; @@ -59,7 +62,7 @@ struct msm_gem_object { struct reservation_object _resv; /* For physically contiguous buffers. Used when we don't have - * an IOMMU. + * an IOMMU. Also used for stolen/splashscreen buffer. */ struct drm_mm_node *vram_node; }; diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 3a78cb4..a9f17bd 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -47,6 +47,10 @@ struct msm_kms_funcs { const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder); + int (*set_split_display)(struct msm_kms *kms, + struct drm_encoder *encoder, + struct drm_encoder *slave_encoder, + bool is_cmd_mode); /* cleanup: */ void (*preclose)(struct msm_kms *kms, struct drm_file *file); void (*destroy)(struct msm_kms *kms); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index a94b11f7..b41965c 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -271,18 +271,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { .best_encoder = omap_connector_attached_encoder, }; -/* flush an area of the framebuffer (in case of manual update display that - * is not automatically flushed) - */ -void omap_connector_flush(struct drm_connector *connector, - int x, int y, int w, int h) -{ - struct omap_connector *omap_connector = to_omap_connector(connector); - - /* TODO: enable when supported in dss */ - VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h); -} - /* initialize connector */ struct drm_connector *omap_connector_init(struct drm_device *dev, int connector_type, struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index b0566a1..f456544 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -28,7 +28,6 @@ struct omap_crtc { struct drm_crtc base; - struct drm_plane *plane; const char *name; int pipe; @@ -46,7 +45,6 @@ struct omap_crtc { struct omap_video_timings timings; bool enabled; - bool full_update; struct omap_drm_apply apply; @@ -74,8 +72,14 @@ struct omap_crtc { * XXX maybe fold into apply_work?? */ struct work_struct page_flip_work; + + bool ignore_digit_sync_lost; }; +/* ----------------------------------------------------------------------------- + * Helper Functions + */ + uint32_t pipe2vbl(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); @@ -83,6 +87,22 @@ uint32_t pipe2vbl(struct drm_crtc *crtc) return dispc_mgr_get_vsync_irq(omap_crtc->channel); } +const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc) +{ + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + return &omap_crtc->timings; +} + +enum omap_channel omap_crtc_channel(struct drm_crtc *crtc) +{ + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + return omap_crtc->channel; +} + +/* ----------------------------------------------------------------------------- + * DSS Manager Functions + */ + /* * Manager-ops, callbacks from output when they need to configure * the upstream part of the video pipe. @@ -122,7 +142,63 @@ static void omap_crtc_start_update(struct omap_overlay_manager *mgr) { } -static void set_enabled(struct drm_crtc *crtc, bool enable); +/* Called only from CRTC pre_apply and suspend/resume handlers. */ +static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) +{ + struct drm_device *dev = crtc->dev; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + enum omap_channel channel = omap_crtc->channel; + struct omap_irq_wait *wait; + u32 framedone_irq, vsync_irq; + int ret; + + if (dispc_mgr_is_enabled(channel) == enable) + return; + + if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) { + /* + * Digit output produces some sync lost interrupts during the + * first frame when enabling, so we need to ignore those. + */ + omap_crtc->ignore_digit_sync_lost = true; + } + + framedone_irq = dispc_mgr_get_framedone_irq(channel); + vsync_irq = dispc_mgr_get_vsync_irq(channel); + + if (enable) { + wait = omap_irq_wait_init(dev, vsync_irq, 1); + } else { + /* + * When we disable the digit output, we need to wait for + * FRAMEDONE to know that DISPC has finished with the output. + * + * OMAP2/3 does not have FRAMEDONE irq for digit output, and in + * that case we need to use vsync interrupt, and wait for both + * even and odd frames. + */ + + if (framedone_irq) + wait = omap_irq_wait_init(dev, framedone_irq, 1); + else + wait = omap_irq_wait_init(dev, vsync_irq, 2); + } + + dispc_mgr_enable(channel, enable); + + ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100)); + if (ret) { + dev_err(dev->dev, "%s: timeout waiting for %s\n", + omap_crtc->name, enable ? "enable" : "disable"); + } + + if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) { + omap_crtc->ignore_digit_sync_lost = false; + /* make sure the irq handler sees the value above */ + mb(); + } +} + static int omap_crtc_enable(struct omap_overlay_manager *mgr) { @@ -131,7 +207,7 @@ static int omap_crtc_enable(struct omap_overlay_manager *mgr) dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info); dispc_mgr_set_timings(omap_crtc->channel, &omap_crtc->timings); - set_enabled(&omap_crtc->base, true); + omap_crtc_set_enabled(&omap_crtc->base, true); return 0; } @@ -140,7 +216,7 @@ static void omap_crtc_disable(struct omap_overlay_manager *mgr) { struct omap_crtc *omap_crtc = omap_crtcs[mgr->id]; - set_enabled(&omap_crtc->base, false); + omap_crtc_set_enabled(&omap_crtc->base, false); } static void omap_crtc_set_timings(struct omap_overlay_manager *mgr, @@ -149,7 +225,6 @@ static void omap_crtc_set_timings(struct omap_overlay_manager *mgr, struct omap_crtc *omap_crtc = omap_crtcs[mgr->id]; DBG("%s", omap_crtc->name); omap_crtc->timings = *timings; - omap_crtc->full_update = true; } static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr, @@ -174,19 +249,201 @@ static void omap_crtc_unregister_framedone_handler( } static const struct dss_mgr_ops mgr_ops = { - .connect = omap_crtc_connect, - .disconnect = omap_crtc_disconnect, - .start_update = omap_crtc_start_update, - .enable = omap_crtc_enable, - .disable = omap_crtc_disable, - .set_timings = omap_crtc_set_timings, - .set_lcd_config = omap_crtc_set_lcd_config, - .register_framedone_handler = omap_crtc_register_framedone_handler, - .unregister_framedone_handler = omap_crtc_unregister_framedone_handler, + .connect = omap_crtc_connect, + .disconnect = omap_crtc_disconnect, + .start_update = omap_crtc_start_update, + .enable = omap_crtc_enable, + .disable = omap_crtc_disable, + .set_timings = omap_crtc_set_timings, + .set_lcd_config = omap_crtc_set_lcd_config, + .register_framedone_handler = omap_crtc_register_framedone_handler, + .unregister_framedone_handler = omap_crtc_unregister_framedone_handler, }; -/* - * CRTC funcs: +/* ----------------------------------------------------------------------------- + * Apply Logic + */ + +static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) +{ + struct omap_crtc *omap_crtc = + container_of(irq, struct omap_crtc, error_irq); + + if (omap_crtc->ignore_digit_sync_lost) { + irqstatus &= ~DISPC_IRQ_SYNC_LOST_DIGIT; + if (!irqstatus) + return; + } + + DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_crtc->name, irqstatus); +} + +static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus) +{ + struct omap_crtc *omap_crtc = + container_of(irq, struct omap_crtc, apply_irq); + struct drm_crtc *crtc = &omap_crtc->base; + + if (!dispc_mgr_go_busy(omap_crtc->channel)) { + struct omap_drm_private *priv = + crtc->dev->dev_private; + DBG("%s: apply done", omap_crtc->name); + __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq); + queue_work(priv->wq, &omap_crtc->apply_work); + } +} + +static void apply_worker(struct work_struct *work) +{ + struct omap_crtc *omap_crtc = + container_of(work, struct omap_crtc, apply_work); + struct drm_crtc *crtc = &omap_crtc->base; + struct drm_device *dev = crtc->dev; + struct omap_drm_apply *apply, *n; + bool need_apply; + + /* + * Synchronize everything on mode_config.mutex, to keep + * the callbacks and list modification all serialized + * with respect to modesetting ioctls from userspace. + */ + drm_modeset_lock(&crtc->mutex, NULL); + dispc_runtime_get(); + + /* + * If we are still pending a previous update, wait.. when the + * pending update completes, we get kicked again. + */ + if (omap_crtc->apply_irq.registered) + goto out; + + /* finish up previous apply's: */ + list_for_each_entry_safe(apply, n, + &omap_crtc->pending_applies, pending_node) { + apply->post_apply(apply); + list_del(&apply->pending_node); + } + + need_apply = !list_empty(&omap_crtc->queued_applies); + + /* then handle the next round of of queued apply's: */ + list_for_each_entry_safe(apply, n, + &omap_crtc->queued_applies, queued_node) { + apply->pre_apply(apply); + list_del(&apply->queued_node); + apply->queued = false; + list_add_tail(&apply->pending_node, + &omap_crtc->pending_applies); + } + + if (need_apply) { + enum omap_channel channel = omap_crtc->channel; + + DBG("%s: GO", omap_crtc->name); + + if (dispc_mgr_is_enabled(channel)) { + dispc_mgr_go(channel); + omap_irq_register(dev, &omap_crtc->apply_irq); + } else { + struct omap_drm_private *priv = dev->dev_private; + queue_work(priv->wq, &omap_crtc->apply_work); + } + } + +out: + dispc_runtime_put(); + drm_modeset_unlock(&crtc->mutex); +} + +int omap_crtc_apply(struct drm_crtc *crtc, + struct omap_drm_apply *apply) +{ + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + + WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); + + /* no need to queue it again if it is already queued: */ + if (apply->queued) + return 0; + + apply->queued = true; + list_add_tail(&apply->queued_node, &omap_crtc->queued_applies); + + /* + * If there are no currently pending updates, then go ahead and + * kick the worker immediately, otherwise it will run again when + * the current update finishes. + */ + if (list_empty(&omap_crtc->pending_applies)) { + struct omap_drm_private *priv = crtc->dev->dev_private; + queue_work(priv->wq, &omap_crtc->apply_work); + } + + return 0; +} + +static void omap_crtc_pre_apply(struct omap_drm_apply *apply) +{ + struct omap_crtc *omap_crtc = + container_of(apply, struct omap_crtc, apply); + struct drm_crtc *crtc = &omap_crtc->base; + struct omap_drm_private *priv = crtc->dev->dev_private; + struct drm_encoder *encoder = NULL; + unsigned int i; + + DBG("%s: enabled=%d", omap_crtc->name, omap_crtc->enabled); + + for (i = 0; i < priv->num_encoders; i++) { + if (priv->encoders[i]->crtc == crtc) { + encoder = priv->encoders[i]; + break; + } + } + + if (omap_crtc->current_encoder && encoder != omap_crtc->current_encoder) + omap_encoder_set_enabled(omap_crtc->current_encoder, false); + + omap_crtc->current_encoder = encoder; + + if (!omap_crtc->enabled) { + if (encoder) + omap_encoder_set_enabled(encoder, false); + } else { + if (encoder) { + omap_encoder_set_enabled(encoder, false); + omap_encoder_update(encoder, omap_crtc->mgr, + &omap_crtc->timings); + omap_encoder_set_enabled(encoder, true); + } + } +} + +static void omap_crtc_post_apply(struct omap_drm_apply *apply) +{ + /* nothing needed for post-apply */ +} + +void omap_crtc_flush(struct drm_crtc *crtc) +{ + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + int loops = 0; + + while (!list_empty(&omap_crtc->pending_applies) || + !list_empty(&omap_crtc->queued_applies) || + omap_crtc->event || omap_crtc->old_fb) { + + if (++loops > 10) { + dev_err(crtc->dev->dev, + "omap_crtc_flush() timeout\n"); + break; + } + + schedule_timeout_uninterruptible(msecs_to_jiffies(20)); + } +} + +/* ----------------------------------------------------------------------------- + * CRTC Functions */ static void omap_crtc_destroy(struct drm_crtc *crtc) @@ -214,17 +471,13 @@ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode) if (enabled != omap_crtc->enabled) { omap_crtc->enabled = enabled; - omap_crtc->full_update = true; omap_crtc_apply(crtc, &omap_crtc->apply); - /* also enable our private plane: */ - WARN_ON(omap_plane_dpms(omap_crtc->plane, mode)); - - /* and any attached overlay planes: */ + /* Enable/disable all planes associated with the CRTC. */ for (i = 0; i < priv->num_planes; i++) { struct drm_plane *plane = priv->planes[i]; if (plane->crtc == crtc) - WARN_ON(omap_plane_dpms(plane, mode)); + WARN_ON(omap_plane_set_enable(plane, enabled)); } } } @@ -256,13 +509,17 @@ static int omap_crtc_mode_set(struct drm_crtc *crtc, mode->type, mode->flags); copy_timings_drm_to_omap(&omap_crtc->timings, mode); - omap_crtc->full_update = true; - return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb, - 0, 0, mode->hdisplay, mode->vdisplay, - x << 16, y << 16, - mode->hdisplay << 16, mode->vdisplay << 16, - NULL, NULL); + /* + * The primary plane CRTC can be reset if the plane is disabled directly + * through the universal plane API. Set it again here. + */ + crtc->primary->crtc = crtc; + + return omap_plane_mode_set(crtc->primary, crtc, crtc->primary->fb, + 0, 0, mode->hdisplay, mode->vdisplay, + x, y, mode->hdisplay, mode->vdisplay, + NULL, NULL); } static void omap_crtc_prepare(struct drm_crtc *crtc) @@ -282,15 +539,13 @@ static void omap_crtc_commit(struct drm_crtc *crtc) static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { - struct omap_crtc *omap_crtc = to_omap_crtc(crtc); - struct drm_plane *plane = omap_crtc->plane; + struct drm_plane *plane = crtc->primary; struct drm_display_mode *mode = &crtc->mode; return omap_plane_mode_set(plane, crtc, crtc->primary->fb, - 0, 0, mode->hdisplay, mode->vdisplay, - x << 16, y << 16, - mode->hdisplay << 16, mode->vdisplay << 16, - NULL, NULL); + 0, 0, mode->hdisplay, mode->vdisplay, + x, y, mode->hdisplay, mode->vdisplay, + NULL, NULL); } static void vblank_cb(void *arg) @@ -299,6 +554,7 @@ static void vblank_cb(void *arg) struct drm_device *dev = crtc->dev; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); unsigned long flags; + struct drm_framebuffer *fb; spin_lock_irqsave(&dev->event_lock, flags); @@ -306,10 +562,15 @@ static void vblank_cb(void *arg) if (omap_crtc->event) drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event); + fb = omap_crtc->old_fb; + omap_crtc->event = NULL; omap_crtc->old_fb = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); + + if (fb) + drm_framebuffer_unreference(fb); } static void page_flip_worker(struct work_struct *work) @@ -321,11 +582,10 @@ static void page_flip_worker(struct work_struct *work) struct drm_gem_object *bo; drm_modeset_lock(&crtc->mutex, NULL); - omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb, - 0, 0, mode->hdisplay, mode->vdisplay, - crtc->x << 16, crtc->y << 16, - mode->hdisplay << 16, mode->vdisplay << 16, - vblank_cb, crtc); + omap_plane_mode_set(crtc->primary, crtc, crtc->primary->fb, + 0, 0, mode->hdisplay, mode->vdisplay, + crtc->x, crtc->y, mode->hdisplay, mode->vdisplay, + vblank_cb, crtc); drm_modeset_unlock(&crtc->mutex); bo = omap_framebuffer_bo(crtc->primary->fb, 0); @@ -361,11 +621,12 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc, if (omap_crtc->old_fb) { spin_unlock_irqrestore(&dev->event_lock, flags); dev_err(dev->dev, "already a pending flip\n"); - return -EINVAL; + return -EBUSY; } omap_crtc->event = event; omap_crtc->old_fb = primary->fb = fb; + drm_framebuffer_reference(omap_crtc->old_fb); spin_unlock_irqrestore(&dev->event_lock, flags); @@ -385,7 +646,6 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc, static int omap_crtc_set_property(struct drm_crtc *crtc, struct drm_property *property, uint64_t val) { - struct omap_crtc *omap_crtc = to_omap_crtc(crtc); struct omap_drm_private *priv = crtc->dev->dev_private; if (property == priv->rotation_prop) { @@ -393,7 +653,7 @@ static int omap_crtc_set_property(struct drm_crtc *crtc, !!(val & ((1LL << DRM_ROTATE_90) | (1LL << DRM_ROTATE_270))); } - return omap_plane_set_property(omap_crtc->plane, property, val); + return omap_plane_set_property(crtc->primary, property, val); } static const struct drm_crtc_funcs omap_crtc_funcs = { @@ -412,256 +672,15 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = { .mode_set_base = omap_crtc_mode_set_base, }; -const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc) -{ - struct omap_crtc *omap_crtc = to_omap_crtc(crtc); - return &omap_crtc->timings; -} - -enum omap_channel omap_crtc_channel(struct drm_crtc *crtc) -{ - struct omap_crtc *omap_crtc = to_omap_crtc(crtc); - return omap_crtc->channel; -} - -static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) -{ - struct omap_crtc *omap_crtc = - container_of(irq, struct omap_crtc, error_irq); - struct drm_crtc *crtc = &omap_crtc->base; - DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus); - /* avoid getting in a flood, unregister the irq until next vblank */ - __omap_irq_unregister(crtc->dev, &omap_crtc->error_irq); -} - -static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus) -{ - struct omap_crtc *omap_crtc = - container_of(irq, struct omap_crtc, apply_irq); - struct drm_crtc *crtc = &omap_crtc->base; - - if (!omap_crtc->error_irq.registered) - __omap_irq_register(crtc->dev, &omap_crtc->error_irq); - - if (!dispc_mgr_go_busy(omap_crtc->channel)) { - struct omap_drm_private *priv = - crtc->dev->dev_private; - DBG("%s: apply done", omap_crtc->name); - __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq); - queue_work(priv->wq, &omap_crtc->apply_work); - } -} - -static void apply_worker(struct work_struct *work) -{ - struct omap_crtc *omap_crtc = - container_of(work, struct omap_crtc, apply_work); - struct drm_crtc *crtc = &omap_crtc->base; - struct drm_device *dev = crtc->dev; - struct omap_drm_apply *apply, *n; - bool need_apply; - - /* - * Synchronize everything on mode_config.mutex, to keep - * the callbacks and list modification all serialized - * with respect to modesetting ioctls from userspace. - */ - drm_modeset_lock(&crtc->mutex, NULL); - dispc_runtime_get(); - - /* - * If we are still pending a previous update, wait.. when the - * pending update completes, we get kicked again. - */ - if (omap_crtc->apply_irq.registered) - goto out; - - /* finish up previous apply's: */ - list_for_each_entry_safe(apply, n, - &omap_crtc->pending_applies, pending_node) { - apply->post_apply(apply); - list_del(&apply->pending_node); - } - - need_apply = !list_empty(&omap_crtc->queued_applies); - - /* then handle the next round of of queued apply's: */ - list_for_each_entry_safe(apply, n, - &omap_crtc->queued_applies, queued_node) { - apply->pre_apply(apply); - list_del(&apply->queued_node); - apply->queued = false; - list_add_tail(&apply->pending_node, - &omap_crtc->pending_applies); - } - - if (need_apply) { - enum omap_channel channel = omap_crtc->channel; - - DBG("%s: GO", omap_crtc->name); - - if (dispc_mgr_is_enabled(channel)) { - omap_irq_register(dev, &omap_crtc->apply_irq); - dispc_mgr_go(channel); - } else { - struct omap_drm_private *priv = dev->dev_private; - queue_work(priv->wq, &omap_crtc->apply_work); - } - } - -out: - dispc_runtime_put(); - drm_modeset_unlock(&crtc->mutex); -} - -int omap_crtc_apply(struct drm_crtc *crtc, - struct omap_drm_apply *apply) -{ - struct omap_crtc *omap_crtc = to_omap_crtc(crtc); - - WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); - - /* no need to queue it again if it is already queued: */ - if (apply->queued) - return 0; - - apply->queued = true; - list_add_tail(&apply->queued_node, &omap_crtc->queued_applies); - - /* - * If there are no currently pending updates, then go ahead and - * kick the worker immediately, otherwise it will run again when - * the current update finishes. - */ - if (list_empty(&omap_crtc->pending_applies)) { - struct omap_drm_private *priv = crtc->dev->dev_private; - queue_work(priv->wq, &omap_crtc->apply_work); - } - - return 0; -} - -/* called only from apply */ -static void set_enabled(struct drm_crtc *crtc, bool enable) -{ - struct drm_device *dev = crtc->dev; - struct omap_crtc *omap_crtc = to_omap_crtc(crtc); - enum omap_channel channel = omap_crtc->channel; - struct omap_irq_wait *wait; - u32 framedone_irq, vsync_irq; - int ret; - - if (dispc_mgr_is_enabled(channel) == enable) - return; - - /* - * Digit output produces some sync lost interrupts during the first - * frame when enabling, so we need to ignore those. - */ - omap_irq_unregister(crtc->dev, &omap_crtc->error_irq); - - framedone_irq = dispc_mgr_get_framedone_irq(channel); - vsync_irq = dispc_mgr_get_vsync_irq(channel); - - if (enable) { - wait = omap_irq_wait_init(dev, vsync_irq, 1); - } else { - /* - * When we disable the digit output, we need to wait for - * FRAMEDONE to know that DISPC has finished with the output. - * - * OMAP2/3 does not have FRAMEDONE irq for digit output, and in - * that case we need to use vsync interrupt, and wait for both - * even and odd frames. - */ - - if (framedone_irq) - wait = omap_irq_wait_init(dev, framedone_irq, 1); - else - wait = omap_irq_wait_init(dev, vsync_irq, 2); - } - - dispc_mgr_enable(channel, enable); - - ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100)); - if (ret) { - dev_err(dev->dev, "%s: timeout waiting for %s\n", - omap_crtc->name, enable ? "enable" : "disable"); - } - - omap_irq_register(crtc->dev, &omap_crtc->error_irq); -} - -static void omap_crtc_pre_apply(struct omap_drm_apply *apply) -{ - struct omap_crtc *omap_crtc = - container_of(apply, struct omap_crtc, apply); - struct drm_crtc *crtc = &omap_crtc->base; - struct drm_encoder *encoder = NULL; - - DBG("%s: enabled=%d, full=%d", omap_crtc->name, - omap_crtc->enabled, omap_crtc->full_update); - - if (omap_crtc->full_update) { - struct omap_drm_private *priv = crtc->dev->dev_private; - int i; - for (i = 0; i < priv->num_encoders; i++) { - if (priv->encoders[i]->crtc == crtc) { - encoder = priv->encoders[i]; - break; - } - } - } - - if (omap_crtc->current_encoder && encoder != omap_crtc->current_encoder) - omap_encoder_set_enabled(omap_crtc->current_encoder, false); - - omap_crtc->current_encoder = encoder; - - if (!omap_crtc->enabled) { - if (encoder) - omap_encoder_set_enabled(encoder, false); - } else { - if (encoder) { - omap_encoder_set_enabled(encoder, false); - omap_encoder_update(encoder, omap_crtc->mgr, - &omap_crtc->timings); - omap_encoder_set_enabled(encoder, true); - } - } - - omap_crtc->full_update = false; -} - -static void omap_crtc_post_apply(struct omap_drm_apply *apply) -{ - /* nothing needed for post-apply */ -} - -void omap_crtc_flush(struct drm_crtc *crtc) -{ - struct omap_crtc *omap_crtc = to_omap_crtc(crtc); - int loops = 0; - - while (!list_empty(&omap_crtc->pending_applies) || - !list_empty(&omap_crtc->queued_applies) || - omap_crtc->event || omap_crtc->old_fb) { - - if (++loops > 10) { - dev_err(crtc->dev->dev, - "omap_crtc_flush() timeout\n"); - break; - } - - schedule_timeout_uninterruptible(msecs_to_jiffies(20)); - } -} +/* ----------------------------------------------------------------------------- + * Init and Cleanup + */ static const char *channel_names[] = { - [OMAP_DSS_CHANNEL_LCD] = "lcd", - [OMAP_DSS_CHANNEL_DIGIT] = "tv", - [OMAP_DSS_CHANNEL_LCD2] = "lcd2", - [OMAP_DSS_CHANNEL_LCD3] = "lcd3", + [OMAP_DSS_CHANNEL_LCD] = "lcd", + [OMAP_DSS_CHANNEL_DIGIT] = "tv", + [OMAP_DSS_CHANNEL_LCD2] = "lcd2", + [OMAP_DSS_CHANNEL_LCD3] = "lcd3", }; void omap_crtc_pre_init(void) @@ -681,12 +700,13 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, struct drm_crtc *crtc = NULL; struct omap_crtc *omap_crtc; struct omap_overlay_manager_info *info; + int ret; DBG("%s", channel_names[channel]); omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL); if (!omap_crtc) - goto fail; + return NULL; crtc = &omap_crtc->base; @@ -700,8 +720,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, omap_crtc->apply.post_apply = omap_crtc_post_apply; omap_crtc->channel = channel; - omap_crtc->plane = plane; - omap_crtc->plane->crtc = crtc; omap_crtc->name = channel_names[channel]; omap_crtc->pipe = id; @@ -723,18 +741,18 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST; info->trans_enabled = false; - drm_crtc_init(dev, crtc, &omap_crtc_funcs); + ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, + &omap_crtc_funcs); + if (ret < 0) { + kfree(omap_crtc); + return NULL; + } + drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs); - omap_plane_install_properties(omap_crtc->plane, &crtc->base); + omap_plane_install_properties(crtc->primary, &crtc->base); omap_crtcs[channel] = omap_crtc; return crtc; - -fail: - if (crtc) - omap_crtc_destroy(crtc); - - return NULL; } diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h index 58bcd6a..9f32a83 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h @@ -148,11 +148,15 @@ struct refill_engine { bool async; - wait_queue_head_t wait_for_refill; + struct completion compl; struct list_head idle_node; }; +struct dmm_platform_data { + uint32_t cpu_cache_flags; +}; + struct dmm { struct device *dev; void __iomem *base; @@ -183,6 +187,8 @@ struct dmm { /* allocation list and lock */ struct list_head alloc_head; + + const struct dmm_platform_data *plat_data; }; #endif diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 56c6055..042038e 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -29,6 +29,7 @@ #include <linux/mm.h> #include <linux/time.h> #include <linux/list.h> +#include <linux/completion.h> #include "omap_dmm_tiler.h" #include "omap_dmm_priv.h" @@ -39,6 +40,10 @@ static struct tcm *containers[TILFMT_NFORMATS]; static struct dmm *omap_dmm; +#if defined(CONFIG_OF) +static const struct of_device_id dmm_of_match[]; +#endif + /* global spinlock for protecting lists */ static DEFINE_SPINLOCK(list_lock); @@ -58,19 +63,19 @@ static const struct { uint32_t slot_w; /* width of each slot (in pixels) */ uint32_t slot_h; /* height of each slot (in pixels) */ } geom[TILFMT_NFORMATS] = { - [TILFMT_8BIT] = GEOM(0, 0, 1), - [TILFMT_16BIT] = GEOM(0, 1, 2), - [TILFMT_32BIT] = GEOM(1, 1, 4), - [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1), + [TILFMT_8BIT] = GEOM(0, 0, 1), + [TILFMT_16BIT] = GEOM(0, 1, 2), + [TILFMT_32BIT] = GEOM(1, 1, 4), + [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1), }; /* lookup table for registers w/ per-engine instances */ static const uint32_t reg[][4] = { - [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1, - DMM_PAT_STATUS__2, DMM_PAT_STATUS__3}, - [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1, - DMM_PAT_DESCR__2, DMM_PAT_DESCR__3}, + [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1, + DMM_PAT_STATUS__2, DMM_PAT_STATUS__3}, + [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1, + DMM_PAT_DESCR__2, DMM_PAT_DESCR__3}, }; /* simple allocator to grab next 16 byte aligned memory from txn */ @@ -142,10 +147,10 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg) for (i = 0; i < dmm->num_engines; i++) { if (status & DMM_IRQSTAT_LST) { - wake_up_interruptible(&dmm->engines[i].wait_for_refill); - if (dmm->engines[i].async) release_engine(&dmm->engines[i]); + + complete(&dmm->engines[i].compl); } status >>= 8; @@ -269,15 +274,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) /* mark whether it is async to denote list management in IRQ handler */ engine->async = wait ? false : true; + reinit_completion(&engine->compl); + /* verify that the irq handler sees the 'async' and completion value */ + smp_mb(); /* kick reload */ writel(engine->refill_pa, dmm->base + reg[PAT_DESCR][engine->id]); if (wait) { - if (wait_event_interruptible_timeout(engine->wait_for_refill, - wait_status(engine, DMM_PATSTATUS_READY) == 0, - msecs_to_jiffies(1)) <= 0) { + if (!wait_for_completion_timeout(&engine->compl, + msecs_to_jiffies(1))) { dev_err(dmm->dev, "timed out waiting for done\n"); ret = -ETIMEDOUT; } @@ -529,6 +536,11 @@ size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h) return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h; } +uint32_t tiler_get_cpu_cache_flags(void) +{ + return omap_dmm->plat_data->cpu_cache_flags; +} + bool dmm_is_available(void) { return omap_dmm ? true : false; @@ -592,6 +604,18 @@ static int omap_dmm_probe(struct platform_device *dev) init_waitqueue_head(&omap_dmm->engine_queue); + if (dev->dev.of_node) { + const struct of_device_id *match; + + match = of_match_node(dmm_of_match, dev->dev.of_node); + if (!match) { + dev_err(&dev->dev, "failed to find matching device node\n"); + return -ENODEV; + } + + omap_dmm->plat_data = match->data; + } + /* lookup hwmod data - base address and irq */ mem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!mem) { @@ -696,7 +720,7 @@ static int omap_dmm_probe(struct platform_device *dev) (REFILL_BUFFER_SIZE * i); omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa + (REFILL_BUFFER_SIZE * i); - init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill); + init_completion(&omap_dmm->engines[i].compl); list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head); } @@ -941,7 +965,7 @@ error: } #endif -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int omap_dmm_resume(struct device *dev) { struct tcm_area area; @@ -965,16 +989,28 @@ static int omap_dmm_resume(struct device *dev) return 0; } - -static const struct dev_pm_ops omap_dmm_pm_ops = { - .resume = omap_dmm_resume, -}; #endif +static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume); + #if defined(CONFIG_OF) +static const struct dmm_platform_data dmm_omap4_platform_data = { + .cpu_cache_flags = OMAP_BO_WC, +}; + +static const struct dmm_platform_data dmm_omap5_platform_data = { + .cpu_cache_flags = OMAP_BO_UNCACHED, +}; + static const struct of_device_id dmm_of_match[] = { - { .compatible = "ti,omap4-dmm", }, - { .compatible = "ti,omap5-dmm", }, + { + .compatible = "ti,omap4-dmm", + .data = &dmm_omap4_platform_data, + }, + { + .compatible = "ti,omap5-dmm", + .data = &dmm_omap5_platform_data, + }, {}, }; #endif @@ -986,9 +1022,7 @@ struct platform_driver omap_dmm_driver = { .owner = THIS_MODULE, .name = DMM_DRIVER_NAME, .of_match_table = of_match_ptr(dmm_of_match), -#ifdef CONFIG_PM .pm = &omap_dmm_pm_ops, -#endif }, }; diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h index 4fdd61e..e83c783 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h @@ -106,6 +106,7 @@ uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient); size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h); size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h); void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h); +uint32_t tiler_get_cpu_cache_flags(void); bool dmm_is_available(void); extern struct platform_driver omap_dmm_driver; diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 8241ed9..94920d4 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -128,6 +128,29 @@ cleanup: return r; } +static int omap_modeset_create_crtc(struct drm_device *dev, int id, + enum omap_channel channel) +{ + struct omap_drm_private *priv = dev->dev_private; + struct drm_plane *plane; + struct drm_crtc *crtc; + + plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_PRIMARY); + if (IS_ERR(plane)) + return PTR_ERR(plane); + + crtc = omap_crtc_init(dev, plane, channel, id); + + BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs)); + priv->crtcs[id] = crtc; + priv->num_crtcs++; + + priv->planes[id] = plane; + priv->num_planes++; + + return 0; +} + static int omap_modeset_init(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; @@ -136,6 +159,7 @@ static int omap_modeset_init(struct drm_device *dev) int num_mgrs = dss_feat_get_num_mgrs(); int num_crtcs; int i, id = 0; + int ret; drm_mode_config_init(dev); @@ -209,18 +233,13 @@ static int omap_modeset_init(struct drm_device *dev) * allocated crtc, we create a new crtc for it */ if (!channel_used(dev, channel)) { - struct drm_plane *plane; - struct drm_crtc *crtc; - - plane = omap_plane_init(dev, id, true); - crtc = omap_crtc_init(dev, plane, channel, id); - - BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs)); - priv->crtcs[id] = crtc; - priv->num_crtcs++; - - priv->planes[id] = plane; - priv->num_planes++; + ret = omap_modeset_create_crtc(dev, id, channel); + if (ret < 0) { + dev_err(dev->dev, + "could not create CRTC (channel %u)\n", + channel); + return ret; + } id++; } @@ -234,26 +253,8 @@ static int omap_modeset_init(struct drm_device *dev) /* find a free manager for this crtc */ for (i = 0; i < num_mgrs; i++) { - if (!channel_used(dev, i)) { - struct drm_plane *plane; - struct drm_crtc *crtc; - - plane = omap_plane_init(dev, id, true); - crtc = omap_crtc_init(dev, plane, i, id); - - BUG_ON(priv->num_crtcs >= - ARRAY_SIZE(priv->crtcs)); - - priv->crtcs[id] = crtc; - priv->num_crtcs++; - - priv->planes[id] = plane; - priv->num_planes++; - + if (!channel_used(dev, i)) break; - } else { - continue; - } } if (i == num_mgrs) { @@ -261,13 +262,24 @@ static int omap_modeset_init(struct drm_device *dev) dev_err(dev->dev, "no managers left for crtc\n"); return -ENOMEM; } + + ret = omap_modeset_create_crtc(dev, id, i); + if (ret < 0) { + dev_err(dev->dev, + "could not create CRTC (channel %u)\n", i); + return ret; + } } /* * Create normal planes for the remaining overlays: */ for (; id < num_ovls; id++) { - struct drm_plane *plane = omap_plane_init(dev, id, false); + struct drm_plane *plane; + + plane = omap_plane_init(dev, id, DRM_PLANE_TYPE_OVERLAY); + if (IS_ERR(plane)) + return PTR_ERR(plane); BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes)); priv->planes[priv->num_planes++] = plane; @@ -286,14 +298,13 @@ static int omap_modeset_init(struct drm_device *dev) for (id = 0; id < priv->num_crtcs; id++) { struct drm_crtc *crtc = priv->crtcs[id]; enum omap_channel crtc_channel; - enum omap_dss_output_id supported_outputs; crtc_channel = omap_crtc_channel(crtc); - supported_outputs = - dss_feat_get_supported_outputs(crtc_channel); - if (supported_outputs & output->id) + if (output->dispc_channel == crtc_channel) { encoder->possible_crtcs |= (1 << id); + break; + } } omap_dss_put_device(output); @@ -480,6 +491,7 @@ static int dev_load(struct drm_device *dev, unsigned long flags) priv->wq = alloc_ordered_workqueue("omapdrm", 0); + spin_lock_init(&priv->list_lock); INIT_LIST_HEAD(&priv->obj_list); omap_gem_init(dev); @@ -519,7 +531,8 @@ static int dev_unload(struct drm_device *dev) drm_kms_helper_poll_fini(dev); - omap_fbdev_free(dev); + if (priv->fbdev) + omap_fbdev_free(dev); /* flush crtcs so the fbs get released */ for (i = 0; i < priv->num_crtcs; i++) @@ -588,9 +601,11 @@ static void dev_lastclose(struct drm_device *dev) } } - ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev); - if (ret) - DBG("failed to restore crtc mode"); + if (priv->fbdev) { + ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev); + if (ret) + DBG("failed to restore crtc mode"); + } } static void dev_preclose(struct drm_device *dev, struct drm_file *file) @@ -610,74 +625,57 @@ static const struct vm_operations_struct omap_gem_vm_ops = { }; static const struct file_operations omapdriver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .unlocked_ioctl = drm_ioctl, - .release = drm_release, - .mmap = omap_gem_mmap, - .poll = drm_poll, - .read = drm_read, - .llseek = noop_llseek, + .owner = THIS_MODULE, + .open = drm_open, + .unlocked_ioctl = drm_ioctl, + .release = drm_release, + .mmap = omap_gem_mmap, + .poll = drm_poll, + .read = drm_read, + .llseek = noop_llseek, }; static struct drm_driver omap_drm_driver = { - .driver_features = - DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, - .load = dev_load, - .unload = dev_unload, - .open = dev_open, - .lastclose = dev_lastclose, - .preclose = dev_preclose, - .postclose = dev_postclose, - .set_busid = drm_platform_set_busid, - .get_vblank_counter = drm_vblank_count, - .enable_vblank = omap_irq_enable_vblank, - .disable_vblank = omap_irq_disable_vblank, - .irq_preinstall = omap_irq_preinstall, - .irq_postinstall = omap_irq_postinstall, - .irq_uninstall = omap_irq_uninstall, - .irq_handler = omap_irq_handler, + .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM + | DRIVER_PRIME, + .load = dev_load, + .unload = dev_unload, + .open = dev_open, + .lastclose = dev_lastclose, + .preclose = dev_preclose, + .postclose = dev_postclose, + .set_busid = drm_platform_set_busid, + .get_vblank_counter = drm_vblank_count, + .enable_vblank = omap_irq_enable_vblank, + .disable_vblank = omap_irq_disable_vblank, + .irq_preinstall = omap_irq_preinstall, + .irq_postinstall = omap_irq_postinstall, + .irq_uninstall = omap_irq_uninstall, + .irq_handler = omap_irq_handler, #ifdef CONFIG_DEBUG_FS - .debugfs_init = omap_debugfs_init, - .debugfs_cleanup = omap_debugfs_cleanup, + .debugfs_init = omap_debugfs_init, + .debugfs_cleanup = omap_debugfs_cleanup, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = omap_gem_prime_export, - .gem_prime_import = omap_gem_prime_import, - .gem_free_object = omap_gem_free_object, - .gem_vm_ops = &omap_gem_vm_ops, - .dumb_create = omap_gem_dumb_create, - .dumb_map_offset = omap_gem_dumb_map_offset, - .dumb_destroy = drm_gem_dumb_destroy, - .ioctls = ioctls, - .num_ioctls = DRM_OMAP_NUM_IOCTLS, - .fops = &omapdriver_fops, - .name = DRIVER_NAME, - .desc = DRIVER_DESC, - .date = DRIVER_DATE, - .major = DRIVER_MAJOR, - .minor = DRIVER_MINOR, - .patchlevel = DRIVER_PATCHLEVEL, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = omap_gem_prime_export, + .gem_prime_import = omap_gem_prime_import, + .gem_free_object = omap_gem_free_object, + .gem_vm_ops = &omap_gem_vm_ops, + .dumb_create = omap_gem_dumb_create, + .dumb_map_offset = omap_gem_dumb_map_offset, + .dumb_destroy = drm_gem_dumb_destroy, + .ioctls = ioctls, + .num_ioctls = DRM_OMAP_NUM_IOCTLS, + .fops = &omapdriver_fops, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, }; -static int pdev_suspend(struct platform_device *pDevice, pm_message_t state) -{ - DBG(""); - return 0; -} - -static int pdev_resume(struct platform_device *device) -{ - DBG(""); - return 0; -} - -static void pdev_shutdown(struct platform_device *device) -{ - DBG(""); -} - static int pdev_probe(struct platform_device *device) { int r; @@ -709,24 +707,35 @@ static int pdev_remove(struct platform_device *device) return 0; } -#ifdef CONFIG_PM -static const struct dev_pm_ops omapdrm_pm_ops = { - .resume = omap_gem_resume, -}; +#ifdef CONFIG_PM_SLEEP +static int omap_drm_suspend(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + + drm_kms_helper_poll_disable(drm_dev); + + return 0; +} + +static int omap_drm_resume(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + + drm_kms_helper_poll_enable(drm_dev); + + return omap_gem_resume(dev); +} #endif +static SIMPLE_DEV_PM_OPS(omapdrm_pm_ops, omap_drm_suspend, omap_drm_resume); + static struct platform_driver pdev = { - .driver = { - .name = DRIVER_NAME, -#ifdef CONFIG_PM - .pm = &omapdrm_pm_ops, -#endif - }, - .probe = pdev_probe, - .remove = pdev_remove, - .suspend = pdev_suspend, - .resume = pdev_resume, - .shutdown = pdev_shutdown, + .driver = { + .name = DRIVER_NAME, + .pm = &omapdrm_pm_ops, + }, + .probe = pdev_probe, + .remove = pdev_remove, }; static int __init omap_drm_init(void) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 60e47b3..b31c79f 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -105,6 +105,9 @@ struct omap_drm_private { struct workqueue_struct *wq; + /* lock for obj_list below */ + spinlock_t list_lock; + /* list of GEM objects: */ struct list_head obj_list; @@ -160,15 +163,15 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, void omap_crtc_flush(struct drm_crtc *crtc); struct drm_plane *omap_plane_init(struct drm_device *dev, - int plane_id, bool private_plane); -int omap_plane_dpms(struct drm_plane *plane, int mode); + int id, enum drm_plane_type type); +int omap_plane_set_enable(struct drm_plane *plane, bool enable); int omap_plane_mode_set(struct drm_plane *plane, - struct drm_crtc *crtc, struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h, - void (*fxn)(void *), void *arg); + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + unsigned int src_x, unsigned int src_y, + unsigned int src_w, unsigned int src_h, + void (*fxn)(void *), void *arg); void omap_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj); int omap_plane_set_property(struct drm_plane *plane, @@ -186,8 +189,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, struct drm_encoder *encoder); struct drm_encoder *omap_connector_attached_encoder( struct drm_connector *connector); -void omap_connector_flush(struct drm_connector *connector, - int x, int y, int w, int h); bool omap_connector_get_hdmi_mode(struct drm_connector *connector); void copy_timings_omap_to_drm(struct drm_display_mode *mode, @@ -208,8 +209,6 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, struct omap_drm_window *win, struct omap_overlay_info *info); struct drm_connector *omap_framebuffer_get_next_connector( struct drm_framebuffer *fb, struct drm_connector *from); -void omap_framebuffer_flush(struct drm_framebuffer *fb, - int x, int y, int w, int h); void omap_gem_init(struct drm_device *dev); void omap_gem_deinit(struct drm_device *dev); diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 2a5cacd..b2c1a29 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -86,6 +86,7 @@ struct plane { struct omap_framebuffer { struct drm_framebuffer base; + int pin_count; const struct format *format; struct plane planes[4]; }; @@ -121,18 +122,6 @@ static int omap_framebuffer_dirty(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips) { - int i; - - drm_modeset_lock_all(fb->dev); - - for (i = 0; i < num_clips; i++) { - omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1, - clips[i].x2 - clips[i].x1, - clips[i].y2 - clips[i].y1); - } - - drm_modeset_unlock_all(fb->dev); - return 0; } @@ -261,6 +250,11 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb) struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); int ret, i, n = drm_format_num_planes(fb->pixel_format); + if (omap_fb->pin_count > 0) { + omap_fb->pin_count++; + return 0; + } + for (i = 0; i < n; i++) { struct plane *plane = &omap_fb->planes[i]; ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true); @@ -269,6 +263,8 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb) omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE); } + omap_fb->pin_count++; + return 0; fail: @@ -287,6 +283,11 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb) struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); int ret, i, n = drm_format_num_planes(fb->pixel_format); + omap_fb->pin_count--; + + if (omap_fb->pin_count > 0) + return 0; + for (i = 0; i < n; i++) { struct plane *plane = &omap_fb->planes[i]; ret = omap_gem_put_paddr(plane->bo); @@ -336,34 +337,6 @@ struct drm_connector *omap_framebuffer_get_next_connector( return NULL; } -/* flush an area of the framebuffer (in case of manual update display that - * is not automatically flushed) - */ -void omap_framebuffer_flush(struct drm_framebuffer *fb, - int x, int y, int w, int h) -{ - struct drm_connector *connector = NULL; - - VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb); - - /* FIXME: This is racy - no protection against modeset config changes. */ - while ((connector = omap_framebuffer_get_next_connector(fb, connector))) { - /* only consider connectors that are part of a chain */ - if (connector->encoder && connector->encoder->crtc) { - /* TODO: maybe this should propagate thru the crtc who - * could do the coordinate translation.. - */ - struct drm_crtc *crtc = connector->encoder->crtc; - int cx = max(0, x - crtc->x); - int cy = max(0, y - crtc->y); - int cw = w + (x - crtc->x) - cx; - int ch = h + (y - crtc->y) - cy; - - omap_connector_flush(connector, cx, cy, cw, ch); - } - } -} - #ifdef CONFIG_DEBUG_FS void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) { @@ -407,7 +380,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { - struct omap_framebuffer *omap_fb; + struct omap_framebuffer *omap_fb = NULL; struct drm_framebuffer *fb = NULL; const struct format *format = NULL; int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format); @@ -450,6 +423,14 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, goto fail; } + if (pitch % format->planes[i].stride_bpp != 0) { + dev_err(dev->dev, + "buffer pitch (%d bytes) is not a multiple of pixel size (%d bytes)\n", + pitch, format->planes[i].stride_bpp); + ret = -EINVAL; + goto fail; + } + size = pitch * mode_cmd->height / format->planes[i].sub_y; if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) { @@ -478,8 +459,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, return fb; fail: - if (fb) - omap_framebuffer_destroy(fb); + kfree(omap_fb); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index d292d24..950cd33 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -42,42 +42,8 @@ struct omap_fbdev { struct work_struct work; }; -static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h); static struct drm_fb_helper *get_fb(struct fb_info *fbi); -static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf, - size_t count, loff_t *ppos) -{ - ssize_t res; - - res = fb_sys_write(fbi, buf, count, ppos); - omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres); - - return res; -} - -static void omap_fbdev_fillrect(struct fb_info *fbi, - const struct fb_fillrect *rect) -{ - sys_fillrect(fbi, rect); - omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height); -} - -static void omap_fbdev_copyarea(struct fb_info *fbi, - const struct fb_copyarea *area) -{ - sys_copyarea(fbi, area); - omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height); -} - -static void omap_fbdev_imageblit(struct fb_info *fbi, - const struct fb_image *image) -{ - sys_imageblit(fbi, image); - omap_fbdev_flush(fbi, image->dx, image->dy, - image->width, image->height); -} - static void pan_worker(struct work_struct *work) { struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work); @@ -121,10 +87,10 @@ static struct fb_ops omap_fb_ops = { * basic fbdev ops which write to the framebuffer */ .fb_read = fb_sys_read, - .fb_write = omap_fbdev_write, - .fb_fillrect = omap_fbdev_fillrect, - .fb_copyarea = omap_fbdev_copyarea, - .fb_imageblit = omap_fbdev_imageblit, + .fb_write = fb_sys_write, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, @@ -294,21 +260,6 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi) return fbi->par; } -/* flush an area of the framebuffer (in case of manual update display that - * is not automatically flushed) - */ -static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h) -{ - struct drm_fb_helper *helper = get_fb(fbi); - - if (!helper) - return; - - VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi); - - omap_framebuffer_flush(helper->fb, x, y, w, h); -} - /* initialize fbdev helper */ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev) { diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index aeb91ed..e9718b9 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -828,6 +828,7 @@ int omap_gem_put_paddr(struct drm_gem_object *obj) dev_err(obj->dev->dev, "could not release unmap: %d\n", ret); } + omap_obj->paddr = 0; omap_obj->block = NULL; } } @@ -1272,13 +1273,16 @@ unlock: void omap_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + struct omap_drm_private *priv = dev->dev_private; struct omap_gem_object *omap_obj = to_omap_bo(obj); evict(obj); WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + spin_lock(&priv->list_lock); list_del(&omap_obj->mm_list); + spin_unlock(&priv->list_lock); drm_gem_free_mmap_offset(obj); @@ -1358,8 +1362,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, /* currently don't allow cached buffers.. there is some caching * stuff that needs to be handled better */ - flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED); - flags |= OMAP_BO_WC; + flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED); + flags |= tiler_get_cpu_cache_flags(); /* align dimensions to slot boundaries... */ tiler_align(gem2fmt(flags), @@ -1376,7 +1380,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, if (!omap_obj) goto fail; + spin_lock(&priv->list_lock); list_add(&omap_obj->mm_list, &priv->obj_list); + spin_unlock(&priv->list_lock); obj = &omap_obj->base; diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index a2dbfb1..b46dabd 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -156,16 +156,16 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, } static struct dma_buf_ops omap_dmabuf_ops = { - .map_dma_buf = omap_gem_map_dma_buf, - .unmap_dma_buf = omap_gem_unmap_dma_buf, - .release = omap_gem_dmabuf_release, - .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, - .end_cpu_access = omap_gem_dmabuf_end_cpu_access, - .kmap_atomic = omap_gem_dmabuf_kmap_atomic, - .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic, - .kmap = omap_gem_dmabuf_kmap, - .kunmap = omap_gem_dmabuf_kunmap, - .mmap = omap_gem_dmabuf_mmap, + .map_dma_buf = omap_gem_map_dma_buf, + .unmap_dma_buf = omap_gem_unmap_dma_buf, + .release = omap_gem_dmabuf_release, + .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, + .end_cpu_access = omap_gem_dmabuf_end_cpu_access, + .kmap_atomic = omap_gem_dmabuf_kmap_atomic, + .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic, + .kmap = omap_gem_dmabuf_kmap, + .kunmap = omap_gem_dmabuf_kunmap, + .mmap = omap_gem_dmabuf_mmap, }; struct dma_buf *omap_gem_prime_export(struct drm_device *dev, diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c index f035d2b..3eb097e 100644 --- a/drivers/gpu/drm/omapdrm/omap_irq.c +++ b/drivers/gpu/drm/omapdrm/omap_irq.c @@ -34,7 +34,7 @@ static void omap_irq_update(struct drm_device *dev) struct omap_drm_irq *irq; uint32_t irqmask = priv->vblank_mask; - BUG_ON(!spin_is_locked(&list_lock)); + assert_spin_locked(&list_lock); list_for_each_entry(irq, &priv->irq_list, node) irqmask |= irq->irqmask; diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index ee8e2b3..1c6b63f 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -65,12 +65,16 @@ struct omap_plane { struct callback apply_done_cb; }; -static void unpin_worker(struct drm_flip_work *work, void *val) +static void omap_plane_unpin_worker(struct drm_flip_work *work, void *val) { struct omap_plane *omap_plane = container_of(work, struct omap_plane, unpin_work); struct drm_device *dev = omap_plane->base.dev; + /* + * omap_framebuffer_pin/unpin are always called from priv->wq, + * so there's no need for locking here. + */ omap_framebuffer_unpin(val); mutex_lock(&dev->mode_config.mutex); drm_framebuffer_unreference(val); @@ -78,7 +82,8 @@ static void unpin_worker(struct drm_flip_work *work, void *val) } /* update which fb (if any) is pinned for scanout */ -static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb) +static int omap_plane_update_pin(struct drm_plane *plane, + struct drm_framebuffer *fb) { struct omap_plane *omap_plane = to_omap_plane(plane); struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb; @@ -121,13 +126,12 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply) struct drm_crtc *crtc = plane->crtc; enum omap_channel channel; bool enabled = omap_plane->enabled && crtc; - bool ilace, replication; int ret; DBG("%s, enabled=%d", omap_plane->name, enabled); /* if fb has changed, pin new fb: */ - update_pin(plane, enabled ? plane->fb : NULL); + omap_plane_update_pin(plane, enabled ? plane->fb : NULL); if (!enabled) { dispc_ovl_enable(omap_plane->id, false); @@ -145,20 +149,17 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply) DBG("%d,%d %pad %pad", info->pos_x, info->pos_y, &info->paddr, &info->p_uv_addr); - /* TODO: */ - ilace = false; - replication = false; + dispc_ovl_set_channel_out(omap_plane->id, channel); /* and finally, update omapdss: */ - ret = dispc_ovl_setup(omap_plane->id, info, - replication, omap_crtc_timings(crtc), false); + ret = dispc_ovl_setup(omap_plane->id, info, false, + omap_crtc_timings(crtc), false); if (ret) { dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret); return; } dispc_ovl_enable(omap_plane->id, true); - dispc_ovl_set_channel_out(omap_plane->id, channel); } static void omap_plane_post_apply(struct omap_drm_apply *apply) @@ -167,7 +168,6 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply) container_of(apply, struct omap_plane, apply); struct drm_plane *plane = &omap_plane->base; struct omap_drm_private *priv = plane->dev->dev_private; - struct omap_overlay_info *info = &omap_plane->info; struct callback cb; cb = omap_plane->apply_done_cb; @@ -177,14 +177,9 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply) if (cb.fxn) cb.fxn(cb.arg); - - if (omap_plane->enabled) { - omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y, - info->out_width, info->out_height); - } } -static int apply(struct drm_plane *plane) +static int omap_plane_apply(struct drm_plane *plane) { if (plane->crtc) { struct omap_plane *omap_plane = to_omap_plane(plane); @@ -194,12 +189,12 @@ static int apply(struct drm_plane *plane) } int omap_plane_mode_set(struct drm_plane *plane, - struct drm_crtc *crtc, struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h, - void (*fxn)(void *), void *arg) + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + unsigned int src_x, unsigned int src_y, + unsigned int src_w, unsigned int src_h, + void (*fxn)(void *), void *arg) { struct omap_plane *omap_plane = to_omap_plane(plane); struct omap_drm_window *win = &omap_plane->win; @@ -209,11 +204,10 @@ int omap_plane_mode_set(struct drm_plane *plane, win->crtc_w = crtc_w; win->crtc_h = crtc_h; - /* src values are in Q16 fixed point, convert to integer: */ - win->src_x = src_x >> 16; - win->src_y = src_y >> 16; - win->src_w = src_w >> 16; - win->src_h = src_h >> 16; + win->src_x = src_x; + win->src_y = src_y; + win->src_w = src_w; + win->src_h = src_h; if (fxn) { /* omap_crtc should ensure that a new page flip @@ -225,15 +219,7 @@ int omap_plane_mode_set(struct drm_plane *plane, omap_plane->apply_done_cb.arg = arg; } - if (plane->fb) - drm_framebuffer_unreference(plane->fb); - - drm_framebuffer_reference(fb); - - plane->fb = fb; - plane->crtc = crtc; - - return apply(plane); + return omap_plane_apply(plane); } static int omap_plane_update(struct drm_plane *plane, @@ -254,17 +240,29 @@ static int omap_plane_update(struct drm_plane *plane, break; } + /* + * We don't need to take a reference to the framebuffer as the DRM core + * has already done so for the purpose of setting plane->fb. + */ + plane->fb = fb; + plane->crtc = crtc; + + /* src values are in Q16 fixed point, convert to integer: */ return omap_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, - src_x, src_y, src_w, src_h, + src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16, NULL, NULL); } static int omap_plane_disable(struct drm_plane *plane) { struct omap_plane *omap_plane = to_omap_plane(plane); + omap_plane->win.rotation = BIT(DRM_ROTATE_0); - return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF); + omap_plane->info.zorder = plane->type == DRM_PLANE_TYPE_PRIMARY + ? 0 : omap_plane->id; + + return omap_plane_set_enable(plane, false); } static void omap_plane_destroy(struct drm_plane *plane) @@ -275,7 +273,6 @@ static void omap_plane_destroy(struct drm_plane *plane) omap_irq_unregister(plane->dev, &omap_plane->error_irq); - omap_plane_disable(plane); drm_plane_cleanup(plane); drm_flip_work_cleanup(&omap_plane->unpin_work); @@ -283,18 +280,15 @@ static void omap_plane_destroy(struct drm_plane *plane) kfree(omap_plane); } -int omap_plane_dpms(struct drm_plane *plane, int mode) +int omap_plane_set_enable(struct drm_plane *plane, bool enable) { struct omap_plane *omap_plane = to_omap_plane(plane); - bool enabled = (mode == DRM_MODE_DPMS_ON); - int ret = 0; - if (enabled != omap_plane->enabled) { - omap_plane->enabled = enabled; - ret = apply(plane); - } + if (enable == omap_plane->enabled) + return 0; - return ret; + omap_plane->enabled = enable; + return omap_plane_apply(plane); } /* helper to install properties which are common to planes and crtcs */ @@ -342,61 +336,63 @@ int omap_plane_set_property(struct drm_plane *plane, if (property == priv->rotation_prop) { DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val); omap_plane->win.rotation = val; - ret = apply(plane); + ret = omap_plane_apply(plane); } else if (property == priv->zorder_prop) { DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val); omap_plane->info.zorder = val; - ret = apply(plane); + ret = omap_plane_apply(plane); } return ret; } static const struct drm_plane_funcs omap_plane_funcs = { - .update_plane = omap_plane_update, - .disable_plane = omap_plane_disable, - .destroy = omap_plane_destroy, - .set_property = omap_plane_set_property, + .update_plane = omap_plane_update, + .disable_plane = omap_plane_disable, + .destroy = omap_plane_destroy, + .set_property = omap_plane_set_property, }; static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) { struct omap_plane *omap_plane = container_of(irq, struct omap_plane, error_irq); - DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus); + DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_plane->name, + irqstatus); } static const char *plane_names[] = { - [OMAP_DSS_GFX] = "gfx", - [OMAP_DSS_VIDEO1] = "vid1", - [OMAP_DSS_VIDEO2] = "vid2", - [OMAP_DSS_VIDEO3] = "vid3", + [OMAP_DSS_GFX] = "gfx", + [OMAP_DSS_VIDEO1] = "vid1", + [OMAP_DSS_VIDEO2] = "vid2", + [OMAP_DSS_VIDEO3] = "vid3", }; static const uint32_t error_irqs[] = { - [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW, - [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW, - [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW, - [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW, + [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW, + [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW, + [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW, + [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW, }; /* initialize plane */ struct drm_plane *omap_plane_init(struct drm_device *dev, - int id, bool private_plane) + int id, enum drm_plane_type type) { struct omap_drm_private *priv = dev->dev_private; - struct drm_plane *plane = NULL; + struct drm_plane *plane; struct omap_plane *omap_plane; struct omap_overlay_info *info; + int ret; - DBG("%s: priv=%d", plane_names[id], private_plane); + DBG("%s: type=%d", plane_names[id], type); omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL); if (!omap_plane) - return NULL; + return ERR_PTR(-ENOMEM); drm_flip_work_init(&omap_plane->unpin_work, - "unpin", unpin_worker); + "unpin", omap_plane_unpin_worker); omap_plane->nformats = omap_framebuffer_get_formats( omap_plane->formats, ARRAY_SIZE(omap_plane->formats), @@ -413,8 +409,11 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, omap_plane->error_irq.irq = omap_plane_error_irq; omap_irq_register(dev, &omap_plane->error_irq); - drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs, - omap_plane->formats, omap_plane->nformats, private_plane); + ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, + &omap_plane_funcs, omap_plane->formats, + omap_plane->nformats, type); + if (ret < 0) + goto error; omap_plane_install_properties(plane, &plane->base); @@ -432,10 +431,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, * TODO add ioctl to give userspace an API to change this.. this * will come in a subsequent patch. */ - if (private_plane) + if (type == DRM_PLANE_TYPE_PRIMARY) omap_plane->info.zorder = 0; else omap_plane->info.zorder = id; return plane; + +error: + omap_irq_unregister(plane->dev, &omap_plane->error_irq); + kfree(omap_plane); + return NULL; } diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index d845837..6d64c7b 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -11,6 +11,7 @@ config DRM_PANEL_SIMPLE tristate "support for simple panels" depends on OF depends on BACKLIGHT_CLASS_DEVICE + select VIDEOMODE_HELPERS help DRM panel driver for dumb panels that need at most a regulator and a GPIO to be powered up. Optionally a backlight can be attached so diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 39806c3..30904a9 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -33,9 +33,14 @@ #include <drm/drm_mipi_dsi.h> #include <drm/drm_panel.h> +#include <video/display_timing.h> +#include <video/videomode.h> + struct panel_desc { const struct drm_display_mode *modes; unsigned int num_modes; + const struct display_timing *timings; + unsigned int num_timings; unsigned int bpc; @@ -94,6 +99,25 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel) if (!panel->desc) return 0; + for (i = 0; i < panel->desc->num_timings; i++) { + const struct display_timing *dt = &panel->desc->timings[i]; + struct videomode vm; + + videomode_from_timing(dt, &vm); + mode = drm_mode_create(drm); + if (!mode) { + dev_err(drm->dev, "failed to add mode %ux%u\n", + dt->hactive.typ, dt->vactive.typ); + continue; + } + + drm_display_mode_from_videomode(&vm, mode); + drm_mode_set_name(mode); + + drm_mode_probed_add(connector, mode); + num++; + } + for (i = 0; i < panel->desc->num_modes; i++) { const struct drm_display_mode *m = &panel->desc->modes[i]; @@ -226,12 +250,30 @@ static int panel_simple_get_modes(struct drm_panel *panel) return num; } +static int panel_simple_get_timings(struct drm_panel *panel, + unsigned int num_timings, + struct display_timing *timings) +{ + struct panel_simple *p = to_panel_simple(panel); + unsigned int i; + + if (p->desc->num_timings < num_timings) + num_timings = p->desc->num_timings; + + if (timings) + for (i = 0; i < num_timings; i++) + timings[i] = p->desc->timings[i]; + + return p->desc->num_timings; +} + static const struct drm_panel_funcs panel_simple_funcs = { .disable = panel_simple_disable, .unprepare = panel_simple_unprepare, .prepare = panel_simple_prepare, .enable = panel_simple_enable, .get_modes = panel_simple_get_modes, + .get_timings = panel_simple_get_timings, }; static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) @@ -327,6 +369,31 @@ static void panel_simple_shutdown(struct device *dev) panel_simple_disable(&panel->base); } +static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = { + .clock = 33333, + .hdisplay = 800, + .hsync_start = 800 + 0, + .hsync_end = 800 + 0 + 255, + .htotal = 800 + 0 + 255 + 0, + .vdisplay = 480, + .vsync_start = 480 + 2, + .vsync_end = 480 + 2 + 45, + .vtotal = 480 + 2 + 45 + 0, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, +}; + +static const struct panel_desc ampire_am800480r3tmqwa1h = { + .modes = &ire_am800480r3tmqwa1h_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 152, + .height = 91, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, +}; + static const struct drm_display_mode auo_b101aw03_mode = { .clock = 51450, .hdisplay = 1024, @@ -350,6 +417,29 @@ static const struct panel_desc auo_b101aw03 = { }, }; +static const struct drm_display_mode auo_b101ean01_mode = { + .clock = 72500, + .hdisplay = 1280, + .hsync_start = 1280 + 119, + .hsync_end = 1280 + 119 + 32, + .htotal = 1280 + 119 + 32 + 21, + .vdisplay = 800, + .vsync_start = 800 + 4, + .vsync_end = 800 + 4 + 20, + .vtotal = 800 + 4 + 20 + 8, + .vrefresh = 60, +}; + +static const struct panel_desc auo_b101ean01 = { + .modes = &auo_b101ean01_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 217, + .height = 136, + }, +}; + static const struct drm_display_mode auo_b101xtn01_mode = { .clock = 72000, .hdisplay = 1366, @@ -615,24 +705,25 @@ static const struct panel_desc giantplus_gpg482739qs5 = { .width = 95, .height = 54, }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, }; -static const struct drm_display_mode hannstar_hsd070pww1_mode = { - .clock = 71100, - .hdisplay = 1280, - .hsync_start = 1280 + 1, - .hsync_end = 1280 + 1 + 158, - .htotal = 1280 + 1 + 158 + 1, - .vdisplay = 800, - .vsync_start = 800 + 1, - .vsync_end = 800 + 1 + 21, - .vtotal = 800 + 1 + 21 + 1, - .vrefresh = 60, +static const struct display_timing hannstar_hsd070pww1_timing = { + .pixelclock = { 64300000, 71100000, 82000000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 1, 1, 10 }, + .hback_porch = { 1, 1, 10 }, + .hsync_len = { 52, 158, 661 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 1, 1, 10 }, + .vback_porch = { 1, 1, 10 }, + .vsync_len = { 1, 21, 203 }, + .flags = DISPLAY_FLAGS_DE_HIGH, }; static const struct panel_desc hannstar_hsd070pww1 = { - .modes = &hannstar_hsd070pww1_mode, - .num_modes = 1, + .timings = &hannstar_hsd070pww1_timing, + .num_timings = 1, .bpc = 6, .size = { .width = 151, @@ -663,6 +754,31 @@ static const struct panel_desc hitachi_tx23d38vm0caa = { }, }; +static const struct drm_display_mode innolux_at043tn24_mode = { + .clock = 9000, + .hdisplay = 480, + .hsync_start = 480 + 2, + .hsync_end = 480 + 2 + 41, + .htotal = 480 + 2 + 41 + 2, + .vdisplay = 272, + .vsync_start = 272 + 2, + .vsync_end = 272 + 2 + 11, + .vtotal = 272 + 2 + 11 + 2, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, +}; + +static const struct panel_desc innolux_at043tn24 = { + .modes = &innolux_at043tn24_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 95, + .height = 54, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, +}; + static const struct drm_display_mode innolux_g121i1_l01_mode = { .clock = 71000, .hdisplay = 1280, @@ -733,6 +849,29 @@ static const struct panel_desc innolux_n156bge_l21 = { }, }; +static const struct drm_display_mode innolux_zj070na_01p_mode = { + .clock = 51501, + .hdisplay = 1024, + .hsync_start = 1024 + 128, + .hsync_end = 1024 + 128 + 64, + .htotal = 1024 + 128 + 64 + 128, + .vdisplay = 600, + .vsync_start = 600 + 16, + .vsync_end = 600 + 16 + 4, + .vtotal = 600 + 16 + 4 + 16, + .vrefresh = 60, +}; + +static const struct panel_desc innolux_zj070na_01p = { + .modes = &innolux_zj070na_01p_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 1024, + .height = 600, + }, +}; + static const struct drm_display_mode lg_lp129qe_mode = { .clock = 285250, .hdisplay = 2560, @@ -756,6 +895,30 @@ static const struct panel_desc lg_lp129qe = { }, }; +static const struct drm_display_mode ortustech_com43h4m85ulc_mode = { + .clock = 25000, + .hdisplay = 480, + .hsync_start = 480 + 10, + .hsync_end = 480 + 10 + 10, + .htotal = 480 + 10 + 10 + 15, + .vdisplay = 800, + .vsync_start = 800 + 3, + .vsync_end = 800 + 3 + 3, + .vtotal = 800 + 3 + 3 + 3, + .vrefresh = 60, +}; + +static const struct panel_desc ortustech_com43h4m85ulc = { + .modes = &ortustech_com43h4m85ulc_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 56, + .height = 93, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, +}; + static const struct drm_display_mode samsung_ltn101nt05_mode = { .clock = 54030, .hdisplay = 1024, @@ -779,11 +942,63 @@ static const struct panel_desc samsung_ltn101nt05 = { }, }; +static const struct drm_display_mode samsung_ltn140at29_301_mode = { + .clock = 76300, + .hdisplay = 1366, + .hsync_start = 1366 + 64, + .hsync_end = 1366 + 64 + 48, + .htotal = 1366 + 64 + 48 + 128, + .vdisplay = 768, + .vsync_start = 768 + 2, + .vsync_end = 768 + 2 + 5, + .vtotal = 768 + 2 + 5 + 17, + .vrefresh = 60, +}; + +static const struct panel_desc samsung_ltn140at29_301 = { + .modes = &samsung_ltn140at29_301_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 320, + .height = 187, + }, +}; + +static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = { + .clock = 33300, + .hdisplay = 800, + .hsync_start = 800 + 1, + .hsync_end = 800 + 1 + 64, + .htotal = 800 + 1 + 64 + 64, + .vdisplay = 480, + .vsync_start = 480 + 1, + .vsync_end = 480 + 1 + 23, + .vtotal = 480 + 1 + 23 + 22, + .vrefresh = 60, +}; + +static const struct panel_desc shelly_sca07010_bfn_lnn = { + .modes = &shelly_sca07010_bfn_lnn_mode, + .num_modes = 1, + .size = { + .width = 152, + .height = 91, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, +}; + static const struct of_device_id platform_of_match[] = { { + .compatible = "ampire,am800480r3tmqwa1h", + .data = &ire_am800480r3tmqwa1h, + }, { .compatible = "auo,b101aw03", .data = &auo_b101aw03, }, { + .compatible = "auo,b101ean01", + .data = &auo_b101ean01, + }, { .compatible = "auo,b101xtn01", .data = &auo_b101xtn01, }, { @@ -826,6 +1041,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "hit,tx23d38vm0caa", .data = &hitachi_tx23d38vm0caa }, { + .compatible = "innolux,at043tn24", + .data = &innolux_at043tn24, + }, { .compatible ="innolux,g121i1-l01", .data = &innolux_g121i1_l01 }, { @@ -835,12 +1053,24 @@ static const struct of_device_id platform_of_match[] = { .compatible = "innolux,n156bge-l21", .data = &innolux_n156bge_l21, }, { + .compatible = "innolux,zj070na-01p", + .data = &innolux_zj070na_01p, + }, { .compatible = "lg,lp129qe", .data = &lg_lp129qe, }, { + .compatible = "ortustech,com43h4m85ulc", + .data = &ortustech_com43h4m85ulc, + }, { .compatible = "samsung,ltn101nt05", .data = &samsung_ltn101nt05, }, { + .compatible = "samsung,ltn140at29-301", + .data = &samsung_ltn140at29_301, + }, { + .compatible = "shelly,sca07010-bfn-lnn", + .data = &shelly_sca07010_bfn_lnn, + }, { /* sentinel */ } }; diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 4605633..dea53e3 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -81,7 +81,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o \ - radeon_sync.o radeon_audio.o + radeon_sync.o radeon_audio.o radeon_dp_auxch.o radeon_dp_mst.o radeon-$(CONFIG_MMU_NOTIFIER) += radeon_mn.o diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 86807ee..dac78ad 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, misc |= ATOM_COMPOSITESYNC; if (mode->flags & DRM_MODE_FLAG_INTERLACE) misc |= ATOM_INTERLACE; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + if (mode->flags & DRM_MODE_FLAG_DBLCLK) misc |= ATOM_DOUBLE_CLOCK_MODE; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; @@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, misc |= ATOM_COMPOSITESYNC; if (mode->flags & DRM_MODE_FLAG_INTERLACE) misc |= ATOM_INTERLACE; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + if (mode->flags & DRM_MODE_FLAG_DBLCLK) misc |= ATOM_DOUBLE_CLOCK_MODE; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; @@ -606,6 +610,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, } } + if (radeon_encoder->is_mst_encoder) { + struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv; + struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv; + + dp_clock = dig_connector->dp_clock; + } + /* use recommended ref_div for ss */ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (radeon_crtc->ss_enabled) { @@ -952,7 +963,9 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_ radeon_crtc->bpc = 8; radeon_crtc->ss_enabled = false; - if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || + if (radeon_encoder->is_mst_encoder) { + radeon_dp_mst_prepare_pll(crtc, mode); + } else if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = @@ -2069,6 +2082,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, radeon_crtc->connector = NULL; return false; } + if (radeon_crtc->encoder) { + struct radeon_encoder *radeon_encoder = + to_radeon_encoder(radeon_crtc->encoder); + + radeon_crtc->output_csc = radeon_encoder->output_csc; + } if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) return false; if (!atombios_crtc_prepare_pll(crtc, adjusted_mode)) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 8d74de8..3e3290c 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -158,7 +158,7 @@ done: #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) static ssize_t -radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct radeon_i2c_chan *chan = container_of(aux, struct radeon_i2c_chan, aux); @@ -226,11 +226,20 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) void radeon_dp_aux_init(struct radeon_connector *radeon_connector) { + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; int ret; radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd; radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; - radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer; + if (ASIC_IS_DCE5(rdev)) { + if (radeon_auxch) + radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_native; + else + radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom; + } else { + radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom; + } ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux); if (!ret) @@ -301,8 +310,8 @@ static int dp_get_max_dp_pix_clock(int link_rate, /***** radeon specific DP functions *****/ -static int radeon_dp_get_max_link_rate(struct drm_connector *connector, - u8 dpcd[DP_DPCD_SIZE]) +int radeon_dp_get_max_link_rate(struct drm_connector *connector, + u8 dpcd[DP_DPCD_SIZE]) { int max_link_rate; diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index c39c1d0..f57c1ab 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -671,7 +671,15 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) struct drm_connector *connector; struct radeon_connector *radeon_connector; struct radeon_connector_atom_dig *dig_connector; + struct radeon_encoder_atom_dig *dig_enc; + if (radeon_encoder_is_digital(encoder)) { + dig_enc = radeon_encoder->enc_priv; + if (dig_enc->active_mst_links) + return ATOM_ENCODER_MODE_DP_MST; + } + if (radeon_encoder->is_mst_encoder || radeon_encoder->offset) + return ATOM_ENCODER_MODE_DP_MST; /* dp bridges are always DP */ if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) return ATOM_ENCODER_MODE_DP; @@ -823,7 +831,7 @@ union dig_encoder_control { }; void -atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) +atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -920,7 +928,10 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000)) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; - args.v3.acConfig.ucDigSel = dig->dig_encoder; + if (enc_override != -1) + args.v3.acConfig.ucDigSel = enc_override; + else + args.v3.acConfig.ucDigSel = dig->dig_encoder; args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder); break; case 4: @@ -948,7 +959,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo else args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ; } - args.v4.acConfig.ucDigSel = dig->dig_encoder; + + if (enc_override != -1) + args.v4.acConfig.ucDigSel = enc_override; + else + args.v4.acConfig.ucDigSel = dig->dig_encoder; args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder); if (hpd_id == RADEON_HPD_NONE) args.v4.ucHPD_ID = 0; @@ -969,6 +984,12 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo } +void +atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) +{ + atombios_dig_encoder_setup2(encoder, action, panel_mode, -1); +} + union dig_transmitter_control { DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; @@ -978,7 +999,7 @@ union dig_transmitter_control { }; void -atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) +atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set, int fe) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -1328,7 +1349,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t args.v5.asConfig.ucHPDSel = 0; else args.v5.asConfig.ucHPDSel = hpd_id + 1; - args.v5.ucDigEncoderSel = 1 << dig_encoder; + args.v5.ucDigEncoderSel = (fe != -1) ? (1 << fe) : (1 << dig_encoder); args.v5.ucDPLaneSet = lane_set; break; default: @@ -1344,6 +1365,12 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } +void +atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) +{ + atombios_dig_transmitter_setup2(encoder, action, lane_num, lane_set, -1); +} + bool atombios_set_edp_panel_power(struct drm_connector *connector, int action) { @@ -1687,6 +1714,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: + + /* don't power off encoders with active MST links */ + if (dig->active_mst_links) + return; + if (ASIC_IS_DCE4(rdev)) { if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); @@ -1955,6 +1987,53 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } +void +atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder, int fe) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); + uint8_t frev, crev; + union crtc_source_param args; + + memset(&args, 0, sizeof(args)); + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; + + if (frev != 1 && crev != 2) + DRM_ERROR("Unknown table for MST %d, %d\n", frev, crev); + + args.v2.ucCRTC = radeon_crtc->crtc_id; + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_DP_MST; + + switch (fe) { + case 0: + args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; + break; + case 1: + args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; + break; + case 2: + args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; + break; + case 3: + args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; + break; + case 4: + args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; + break; + case 5: + args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; + break; + case 6: + args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID; + break; + } + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + static void atombios_apply_encoder_quirks(struct drm_encoder *encoder, struct drm_display_mode *mode) @@ -2003,7 +2082,14 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder, } } -static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) +void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx) +{ + if (enc_idx < 0) + return; + rdev->mode_info.active_encoders &= ~(1 << enc_idx); +} + +int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -2012,71 +2098,79 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) struct drm_encoder *test_encoder; struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t dig_enc_in_use = 0; + int enc_idx = -1; + if (fe_idx >= 0) { + enc_idx = fe_idx; + goto assigned; + } if (ASIC_IS_DCE6(rdev)) { /* DCE6 */ switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: if (dig->linkb) - return 1; + enc_idx = 1; else - return 0; + enc_idx = 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) - return 3; + enc_idx = 3; else - return 2; + enc_idx = 2; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) - return 5; + enc_idx = 5; else - return 4; + enc_idx = 4; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: - return 6; + enc_idx = 6; break; } + goto assigned; } else if (ASIC_IS_DCE4(rdev)) { /* DCE4/5 */ if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) { /* ontario follows DCE4 */ if (rdev->family == CHIP_PALM) { if (dig->linkb) - return 1; + enc_idx = 1; else - return 0; + enc_idx = 0; } else /* llano follows DCE3.2 */ - return radeon_crtc->crtc_id; + enc_idx = radeon_crtc->crtc_id; } else { switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: if (dig->linkb) - return 1; + enc_idx = 1; else - return 0; + enc_idx = 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) - return 3; + enc_idx = 3; else - return 2; + enc_idx = 2; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) - return 5; + enc_idx = 5; else - return 4; + enc_idx = 4; break; } } + goto assigned; } /* on DCE32 and encoder can driver any block so just crtc id */ if (ASIC_IS_DCE32(rdev)) { - return radeon_crtc->crtc_id; + enc_idx = radeon_crtc->crtc_id; + goto assigned; } /* on DCE3 - LVTMA can only be driven by DIGB */ @@ -2104,6 +2198,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) if (!(dig_enc_in_use & 1)) return 0; return 1; + +assigned: + if (enc_idx == -1) { + DRM_ERROR("Got encoder index incorrect - returning 0\n"); + return 0; + } + if (rdev->mode_info.active_encoders & (1 << enc_idx)) { + DRM_ERROR("chosen encoder in use %d\n", enc_idx); + } + rdev->mode_info.active_encoders |= (1 << enc_idx); + return enc_idx; } /* This only needs to be called once at startup */ @@ -2362,7 +2467,9 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) ENCODER_OBJECT_ID_NONE)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; if (dig) { - dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); + if (dig->dig_encoder >= 0) + radeon_atom_release_dig_encoder(rdev, dig->dig_encoder); + dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder, -1); if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) { if (rdev->family >= CHIP_R600) dig->afmt = rdev->mode_info.afmt[dig->dig_encoder]; @@ -2464,10 +2571,18 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) disable_done: if (radeon_encoder_is_digital(encoder)) { - dig = radeon_encoder->enc_priv; - dig->dig_encoder = -1; - } - radeon_encoder->active_device = 0; + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { + if (rdev->asic->display.hdmi_enable) + radeon_hdmi_enable(rdev, encoder, false); + } + if (atombios_get_encoder_mode(encoder) != ATOM_ENCODER_MODE_DP_MST) { + dig = radeon_encoder->enc_priv; + radeon_atom_release_dig_encoder(rdev, dig->dig_encoder); + dig->dig_encoder = -1; + radeon_encoder->active_device = 0; + } + } else + radeon_encoder->active_device = 0; } /* these are handled by the primary encoders */ diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index db08f17..69556f5 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -2751,13 +2751,54 @@ void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, else /* current_index == 2 */ pl = &ps->high; seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); - if (rdev->family >= CHIP_CEDAR) { - seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", - current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); - } else { - seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n", - current_index, pl->sclk, pl->mclk, pl->vddc); - } + seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", + current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); + } +} + +u32 btc_dpm_get_current_sclk(struct radeon_device *rdev) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; + struct rv7xx_ps *ps = rv770_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> + CURRENT_PROFILE_INDEX_SHIFT; + + if (current_index > 2) { + return 0; + } else { + if (current_index == 0) + pl = &ps->low; + else if (current_index == 1) + pl = &ps->medium; + else /* current_index == 2 */ + pl = &ps->high; + return pl->sclk; + } +} + +u32 btc_dpm_get_current_mclk(struct radeon_device *rdev) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; + struct rv7xx_ps *ps = rv770_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> + CURRENT_PROFILE_INDEX_SHIFT; + + if (current_index > 2) { + return 0; + } else { + if (current_index == 0) + pl = &ps->low; + else if (current_index == 1) + pl = &ps->medium; + else /* current_index == 2 */ + pl = &ps->high; + return pl->mclk; } } diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index bcd2f1f..8730562 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5922,6 +5922,20 @@ void ci_dpm_print_power_state(struct radeon_device *rdev, r600_dpm_print_ps_status(rdev, rps); } +u32 ci_dpm_get_current_sclk(struct radeon_device *rdev) +{ + u32 sclk = ci_get_average_sclk_freq(rdev); + + return sclk; +} + +u32 ci_dpm_get_current_mclk(struct radeon_device *rdev) +{ + u32 mclk = ci_get_average_mclk_freq(rdev); + + return mclk; +} + u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low) { struct ci_power_info *pi = ci_get_pi(rdev); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 3e670d3..28faea9 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -141,6 +141,39 @@ static void cik_fini_cg(struct radeon_device *rdev); static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, bool enable); +/** + * cik_get_allowed_info_register - fetch the register for the info ioctl + * + * @rdev: radeon_device pointer + * @reg: register offset in bytes + * @val: register value + * + * Returns 0 for success or -EINVAL for an invalid register + * + */ +int cik_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val) +{ + switch (reg) { + case GRBM_STATUS: + case GRBM_STATUS2: + case GRBM_STATUS_SE0: + case GRBM_STATUS_SE1: + case GRBM_STATUS_SE2: + case GRBM_STATUS_SE3: + case SRBM_STATUS: + case SRBM_STATUS2: + case (SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET): + case (SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET): + case UVD_STATUS: + /* TODO VCE */ + *val = RREG32(reg); + return 0; + default: + return -EINVAL; + } +} + /* get temperature in millidegrees */ int ci_get_temp(struct radeon_device *rdev) { @@ -7394,12 +7427,12 @@ int cik_irq_set(struct radeon_device *rdev) (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE; - hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; @@ -7486,27 +7519,27 @@ int cik_irq_set(struct radeon_device *rdev) } if (rdev->irq.hpd[0]) { DRM_DEBUG("cik_irq_set: hpd 1\n"); - hpd1 |= DC_HPDx_INT_EN; + hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[1]) { DRM_DEBUG("cik_irq_set: hpd 2\n"); - hpd2 |= DC_HPDx_INT_EN; + hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[2]) { DRM_DEBUG("cik_irq_set: hpd 3\n"); - hpd3 |= DC_HPDx_INT_EN; + hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[3]) { DRM_DEBUG("cik_irq_set: hpd 4\n"); - hpd4 |= DC_HPDx_INT_EN; + hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[4]) { DRM_DEBUG("cik_irq_set: hpd 5\n"); - hpd5 |= DC_HPDx_INT_EN; + hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[5]) { DRM_DEBUG("cik_irq_set: hpd 6\n"); - hpd6 |= DC_HPDx_INT_EN; + hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } WREG32(CP_INT_CNTL_RING0, cp_int_cntl); @@ -7678,6 +7711,36 @@ static inline void cik_irq_ack(struct radeon_device *rdev) tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } + if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) { + tmp = RREG32(DC_HPD1_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD1_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + tmp = RREG32(DC_HPD2_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD2_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + tmp = RREG32(DC_HPD3_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD3_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + tmp = RREG32(DC_HPD4_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD4_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } } /** @@ -7803,6 +7866,7 @@ int cik_irq_process(struct radeon_device *rdev) u8 me_id, pipe_id, queue_id; u32 ring_index; bool queue_hotplug = false; + bool queue_dp = false; bool queue_reset = false; u32 addr, status, mc_client; bool queue_thermal = false; @@ -8048,6 +8112,48 @@ restart_ih: DRM_DEBUG("IH: HPD6\n"); } break; + case 6: + if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 1\n"); + } + break; + case 7: + if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 2\n"); + } + break; + case 8: + if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 3\n"); + } + break; + case 9: + if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 4\n"); + } + break; + case 10: + if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 5\n"); + } + break; + case 11: + if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 6\n"); + } + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; @@ -8256,6 +8362,8 @@ restart_ih: rptr &= rdev->ih.ptr_mask; WREG32(IH_RB_RPTR, rptr); } + if (queue_dp) + schedule_work(&rdev->dp_work); if (queue_hotplug) schedule_work(&rdev->hotplug_work); if (queue_reset) { diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index c648e19..0089d83 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -2088,6 +2088,8 @@ # define CLK_OD(x) ((x) << 6) # define CLK_OD_MASK (0x1f << 6) +#define UVD_STATUS 0xf6bc + /* UVD clocks */ #define CG_DCLK_CNTL 0xC050009C @@ -2129,6 +2131,7 @@ #define VCE_UENC_REG_CLOCK_GATING 0x207c0 #define VCE_SYS_INT_EN 0x21300 # define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3) +#define VCE_LMI_VCPU_CACHE_40BIT_BAR 0x2145c #define VCE_LMI_CTRL2 0x21474 #define VCE_LMI_CTRL 0x21498 #define VCE_LMI_VM_CTRL 0x214a0 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 973df06..f848acf 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1006,6 +1006,34 @@ static void evergreen_init_golden_registers(struct radeon_device *rdev) } } +/** + * evergreen_get_allowed_info_register - fetch the register for the info ioctl + * + * @rdev: radeon_device pointer + * @reg: register offset in bytes + * @val: register value + * + * Returns 0 for success or -EINVAL for an invalid register + * + */ +int evergreen_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val) +{ + switch (reg) { + case GRBM_STATUS: + case GRBM_STATUS_SE0: + case GRBM_STATUS_SE1: + case SRBM_STATUS: + case SRBM_STATUS2: + case DMA_STATUS_REG: + case UVD_STATUS: + *val = RREG32(reg); + return 0; + default: + return -EINVAL; + } +} + void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, unsigned *bankh, unsigned *mtaspect, unsigned *tile_split) @@ -4392,12 +4420,12 @@ int evergreen_irq_set(struct radeon_device *rdev) return 0; } - hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); if (rdev->family == CHIP_ARUBA) thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) & ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); @@ -4486,27 +4514,27 @@ int evergreen_irq_set(struct radeon_device *rdev) } if (rdev->irq.hpd[0]) { DRM_DEBUG("evergreen_irq_set: hpd 1\n"); - hpd1 |= DC_HPDx_INT_EN; + hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[1]) { DRM_DEBUG("evergreen_irq_set: hpd 2\n"); - hpd2 |= DC_HPDx_INT_EN; + hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[2]) { DRM_DEBUG("evergreen_irq_set: hpd 3\n"); - hpd3 |= DC_HPDx_INT_EN; + hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[3]) { DRM_DEBUG("evergreen_irq_set: hpd 4\n"); - hpd4 |= DC_HPDx_INT_EN; + hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[4]) { DRM_DEBUG("evergreen_irq_set: hpd 5\n"); - hpd5 |= DC_HPDx_INT_EN; + hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[5]) { DRM_DEBUG("evergreen_irq_set: hpd 6\n"); - hpd6 |= DC_HPDx_INT_EN; + hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.afmt[0]) { DRM_DEBUG("evergreen_irq_set: hdmi 0\n"); @@ -4700,6 +4728,38 @@ static void evergreen_irq_ack(struct radeon_device *rdev) tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } + + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { + tmp = RREG32(DC_HPD1_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD1_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + tmp = RREG32(DC_HPD2_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD2_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + tmp = RREG32(DC_HPD3_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD3_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + tmp = RREG32(DC_HPD4_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD4_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; @@ -4780,6 +4840,7 @@ int evergreen_irq_process(struct radeon_device *rdev) u32 ring_index; bool queue_hotplug = false; bool queue_hdmi = false; + bool queue_dp = false; bool queue_thermal = false; u32 status, addr; @@ -5019,6 +5080,48 @@ restart_ih: DRM_DEBUG("IH: HPD6\n"); } break; + case 6: + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 1\n"); + } + break; + case 7: + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 2\n"); + } + break; + case 8: + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 3\n"); + } + break; + case 9: + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 4\n"); + } + break; + case 10: + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 5\n"); + } + break; + case 11: + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 6\n"); + } + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; @@ -5151,6 +5254,8 @@ restart_ih: rptr &= rdev->ih.ptr_mask; WREG32(IH_RB_RPTR, rptr); } + if (queue_dp) + schedule_work(&rdev->dp_work); if (queue_hotplug) schedule_work(&rdev->hotplug_work); if (queue_hdmi) diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index a8d1d52..4aa5f75 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -1520,6 +1520,7 @@ #define UVD_UDEC_DBW_ADDR_CONFIG 0xef54 #define UVD_RBC_RB_RPTR 0xf690 #define UVD_RBC_RB_WPTR 0xf694 +#define UVD_STATUS 0xf6bc /* * PM4 diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 0e236d0..2d71da4 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2820,6 +2820,29 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, } } +u32 kv_dpm_get_current_sclk(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u32 current_index = + (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> + CURR_SCLK_INDEX_SHIFT; + u32 sclk; + + if (current_index >= SMU__NUM_SCLK_DPM_STATE) { + return 0; + } else { + sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); + return sclk; + } +} + +u32 kv_dpm_get_current_mclk(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + return pi->sys_info.bootup_uma_clk; +} + void kv_dpm_print_power_state(struct radeon_device *rdev, struct radeon_ps *rps) { diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index dab0081..e8a496f 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -828,6 +828,35 @@ out: return err; } +/** + * cayman_get_allowed_info_register - fetch the register for the info ioctl + * + * @rdev: radeon_device pointer + * @reg: register offset in bytes + * @val: register value + * + * Returns 0 for success or -EINVAL for an invalid register + * + */ +int cayman_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val) +{ + switch (reg) { + case GRBM_STATUS: + case GRBM_STATUS_SE0: + case GRBM_STATUS_SE1: + case SRBM_STATUS: + case SRBM_STATUS2: + case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET): + case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET): + case UVD_STATUS: + *val = RREG32(reg); + return 0; + default: + return -EINVAL; + } +} + int tn_get_temp(struct radeon_device *rdev) { u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff; diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 7bc9f8d..c3d531a 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -4319,6 +4319,42 @@ void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, } } +u32 ni_dpm_get_current_sclk(struct radeon_device *rdev) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; + struct ni_ps *ps = ni_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> + CURRENT_STATE_INDEX_SHIFT; + + if (current_index >= ps->performance_level_count) { + return 0; + } else { + pl = &ps->performance_levels[current_index]; + return pl->sclk; + } +} + +u32 ni_dpm_get_current_mclk(struct radeon_device *rdev) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; + struct ni_ps *ps = ni_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> + CURRENT_STATE_INDEX_SHIFT; + + if (current_index >= ps->performance_level_count) { + return 0; + } else { + pl = &ps->performance_levels[current_index]; + return pl->mclk; + } +} + u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h index 5db7b7d..da310a70 100644 --- a/drivers/gpu/drm/radeon/ni_reg.h +++ b/drivers/gpu/drm/radeon/ni_reg.h @@ -83,4 +83,48 @@ # define NI_REGAMMA_PROG_B 4 # define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4) +#define NI_DP_MSE_LINK_TIMING 0x73a0 +# define NI_DP_MSE_LINK_FRAME (((x) & 0x3ff) << 0) +# define NI_DP_MSE_LINK_LINE (((x) & 0x3) << 16) + +#define NI_DP_MSE_MISC_CNTL 0x736c +# define NI_DP_MSE_BLANK_CODE (((x) & 0x1) << 0) +# define NI_DP_MSE_TIMESTAMP_MODE (((x) & 0x1) << 4) +# define NI_DP_MSE_ZERO_ENCODER (((x) & 0x1) << 8) + +#define NI_DP_MSE_RATE_CNTL 0x7384 +# define NI_DP_MSE_RATE_Y(x) (((x) & 0x3ffffff) << 0) +# define NI_DP_MSE_RATE_X(x) (((x) & 0x3f) << 26) + +#define NI_DP_MSE_RATE_UPDATE 0x738c + +#define NI_DP_MSE_SAT0 0x7390 +# define NI_DP_MSE_SAT_SRC0(x) (((x) & 0x7) << 0) +# define NI_DP_MSE_SAT_SLOT_COUNT0(x) (((x) & 0x3f) << 8) +# define NI_DP_MSE_SAT_SRC1(x) (((x) & 0x7) << 16) +# define NI_DP_MSE_SAT_SLOT_COUNT1(x) (((x) & 0x3f) << 24) + +#define NI_DP_MSE_SAT1 0x7394 + +#define NI_DP_MSE_SAT2 0x7398 + +#define NI_DP_MSE_SAT_UPDATE 0x739c + +#define NI_DIG_BE_CNTL 0x7140 +# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8) +# define NI_DIG_FE_DIG_MODE(x) (((x) & 0x7) << 16) +# define NI_DIG_MODE_DP_SST 0 +# define NI_DIG_MODE_LVDS 1 +# define NI_DIG_MODE_TMDS_DVI 2 +# define NI_DIG_MODE_TMDS_HDMI 3 +# define NI_DIG_MODE_DP_MST 5 +# define NI_DIG_HPD_SELECT(x) (((x) & 0x7) << 28) + +#define NI_DIG_FE_CNTL 0x7000 +# define NI_DIG_SOURCE_SELECT(x) (((x) & 0x3) << 0) +# define NI_DIG_STEREOSYNC_SELECT(x) (((x) & 0x3) << 4) +# define NI_DIG_STEREOSYNC_GATE_EN(x) (((x) & 0x1) << 8) +# define NI_DIG_DUAL_LINK_ENABLE(x) (((x) & 0x1) << 16) +# define NI_DIG_SWAP(x) (((x) & 0x1) << 18) +# define NI_DIG_SYMCLK_FE_ON (0x1 << 24) #endif diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 6b44580..3b29083 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -816,6 +816,52 @@ #define MC_PMG_CMD_MRS2 0x2b5c #define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60 +#define AUX_CONTROL 0x6200 +#define AUX_EN (1 << 0) +#define AUX_LS_READ_EN (1 << 8) +#define AUX_LS_UPDATE_DISABLE(x) (((x) & 0x1) << 12) +#define AUX_HPD_DISCON(x) (((x) & 0x1) << 16) +#define AUX_DET_EN (1 << 18) +#define AUX_HPD_SEL(x) (((x) & 0x7) << 20) +#define AUX_IMPCAL_REQ_EN (1 << 24) +#define AUX_TEST_MODE (1 << 28) +#define AUX_DEGLITCH_EN (1 << 29) +#define AUX_SW_CONTROL 0x6204 +#define AUX_SW_GO (1 << 0) +#define AUX_LS_READ_TRIG (1 << 2) +#define AUX_SW_START_DELAY(x) (((x) & 0xf) << 4) +#define AUX_SW_WR_BYTES(x) (((x) & 0x1f) << 16) + +#define AUX_SW_INTERRUPT_CONTROL 0x620c +#define AUX_SW_DONE_INT (1 << 0) +#define AUX_SW_DONE_ACK (1 << 1) +#define AUX_SW_DONE_MASK (1 << 2) +#define AUX_SW_LS_DONE_INT (1 << 4) +#define AUX_SW_LS_DONE_MASK (1 << 6) +#define AUX_SW_STATUS 0x6210 +#define AUX_SW_DONE (1 << 0) +#define AUX_SW_REQ (1 << 1) +#define AUX_SW_RX_TIMEOUT_STATE(x) (((x) & 0x7) << 4) +#define AUX_SW_RX_TIMEOUT (1 << 7) +#define AUX_SW_RX_OVERFLOW (1 << 8) +#define AUX_SW_RX_HPD_DISCON (1 << 9) +#define AUX_SW_RX_PARTIAL_BYTE (1 << 10) +#define AUX_SW_NON_AUX_MODE (1 << 11) +#define AUX_SW_RX_MIN_COUNT_VIOL (1 << 12) +#define AUX_SW_RX_INVALID_STOP (1 << 14) +#define AUX_SW_RX_SYNC_INVALID_L (1 << 17) +#define AUX_SW_RX_SYNC_INVALID_H (1 << 18) +#define AUX_SW_RX_INVALID_START (1 << 19) +#define AUX_SW_RX_RECV_NO_DET (1 << 20) +#define AUX_SW_RX_RECV_INVALID_H (1 << 22) +#define AUX_SW_RX_RECV_INVALID_V (1 << 23) + +#define AUX_SW_DATA 0x6218 +#define AUX_SW_DATA_RW (1 << 0) +#define AUX_SW_DATA_MASK(x) (((x) & 0xff) << 8) +#define AUX_SW_DATA_INDEX(x) (((x) & 0x1f) << 16) +#define AUX_SW_AUTOINCREMENT_DISABLE (1 << 31) + #define LB_SYNC_RESET_SEL 0x6b28 #define LB_SYNC_RESET_SEL_MASK (3 << 0) #define LB_SYNC_RESET_SEL_SHIFT 0 @@ -1086,6 +1132,7 @@ #define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54 #define UVD_RBC_RB_RPTR 0xF690 #define UVD_RBC_RB_WPTR 0xF694 +#define UVD_STATUS 0xf6bc /* * PM4 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 2fcad34..8f6d862 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -109,6 +109,32 @@ extern int evergreen_rlc_resume(struct radeon_device *rdev); extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev); /** + * r600_get_allowed_info_register - fetch the register for the info ioctl + * + * @rdev: radeon_device pointer + * @reg: register offset in bytes + * @val: register value + * + * Returns 0 for success or -EINVAL for an invalid register + * + */ +int r600_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val) +{ + switch (reg) { + case GRBM_STATUS: + case GRBM_STATUS2: + case R_000E50_SRBM_STATUS: + case DMA_STATUS_REG: + case UVD_STATUS: + *val = RREG32(reg); + return 0; + default: + return -EINVAL; + } +} + +/** * r600_get_xclk - get the xclk * * @rdev: radeon_device pointer diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5587603..73a6432 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -111,6 +111,8 @@ extern int radeon_deep_color; extern int radeon_use_pflipirq; extern int radeon_bapm; extern int radeon_backlight; +extern int radeon_auxch; +extern int radeon_mst; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting @@ -1565,6 +1567,7 @@ struct radeon_dpm { int new_active_crtc_count; u32 current_active_crtcs; int current_active_crtc_count; + bool single_display; struct radeon_dpm_dynamic_state dyn_state; struct radeon_dpm_fan fan; u32 tdp_limit; @@ -1856,6 +1859,8 @@ struct radeon_asic { u32 (*get_xclk)(struct radeon_device *rdev); /* get the gpu clock counter */ uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev); + /* get register for info ioctl */ + int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val); /* gart */ struct { void (*tlb_flush)(struct radeon_device *rdev); @@ -1984,6 +1989,8 @@ struct radeon_asic { u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev); int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed); int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed); + u32 (*get_current_sclk)(struct radeon_device *rdev); + u32 (*get_current_mclk)(struct radeon_device *rdev); } dpm; /* pageflipping */ struct { @@ -2407,6 +2414,7 @@ struct radeon_device { struct radeon_rlc rlc; struct radeon_mec mec; struct work_struct hotplug_work; + struct work_struct dp_work; struct work_struct audio_work; int num_crtc; /* number of crtcs */ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ @@ -2931,6 +2939,7 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) #define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) #define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev)) #define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev)) +#define radeon_get_allowed_info_register(rdev, r, v) (rdev)->asic->get_allowed_info_register((rdev), (r), (v)) #define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev)) #define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev)) #define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev)) @@ -2949,6 +2958,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) #define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) #define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e)) +#define radeon_dpm_get_current_sclk(rdev) rdev->asic->dpm.get_current_sclk((rdev)) +#define radeon_dpm_get_current_mclk(rdev) rdev->asic->dpm.get_current_mclk((rdev)) /* Common functions */ /* AGP */ diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index c0ecd12..fafd8ce 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -136,6 +136,11 @@ static void radeon_register_accessor_init(struct radeon_device *rdev) } } +static int radeon_invalid_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val) +{ + return -EINVAL; +} /* helper to disable agp */ /** @@ -199,6 +204,7 @@ static struct radeon_asic r100_asic = { .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r100_mc_wait_for_idle, + .get_allowed_info_register = radeon_invalid_get_allowed_info_register, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, .get_page_entry = &r100_pci_gart_get_page_entry, @@ -266,6 +272,7 @@ static struct radeon_asic r200_asic = { .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r100_mc_wait_for_idle, + .get_allowed_info_register = radeon_invalid_get_allowed_info_register, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, .get_page_entry = &r100_pci_gart_get_page_entry, @@ -361,6 +368,7 @@ static struct radeon_asic r300_asic = { .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle, + .get_allowed_info_register = radeon_invalid_get_allowed_info_register, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, .get_page_entry = &r100_pci_gart_get_page_entry, @@ -428,6 +436,7 @@ static struct radeon_asic r300_asic_pcie = { .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle, + .get_allowed_info_register = radeon_invalid_get_allowed_info_register, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, .get_page_entry = &rv370_pcie_gart_get_page_entry, @@ -495,6 +504,7 @@ static struct radeon_asic r420_asic = { .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle, + .get_allowed_info_register = radeon_invalid_get_allowed_info_register, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, .get_page_entry = &rv370_pcie_gart_get_page_entry, @@ -562,6 +572,7 @@ static struct radeon_asic rs400_asic = { .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rs400_mc_wait_for_idle, + .get_allowed_info_register = radeon_invalid_get_allowed_info_register, .gart = { .tlb_flush = &rs400_gart_tlb_flush, .get_page_entry = &rs400_gart_get_page_entry, @@ -629,6 +640,7 @@ static struct radeon_asic rs600_asic = { .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rs600_mc_wait_for_idle, + .get_allowed_info_register = radeon_invalid_get_allowed_info_register, .gart = { .tlb_flush = &rs600_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -696,6 +708,7 @@ static struct radeon_asic rs690_asic = { .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rs690_mc_wait_for_idle, + .get_allowed_info_register = radeon_invalid_get_allowed_info_register, .gart = { .tlb_flush = &rs400_gart_tlb_flush, .get_page_entry = &rs400_gart_get_page_entry, @@ -763,6 +776,7 @@ static struct radeon_asic rv515_asic = { .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rv515_mc_wait_for_idle, + .get_allowed_info_register = radeon_invalid_get_allowed_info_register, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, .get_page_entry = &rv370_pcie_gart_get_page_entry, @@ -830,6 +844,7 @@ static struct radeon_asic r520_asic = { .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r520_mc_wait_for_idle, + .get_allowed_info_register = radeon_invalid_get_allowed_info_register, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, .get_page_entry = &rv370_pcie_gart_get_page_entry, @@ -925,6 +940,7 @@ static struct radeon_asic r600_asic = { .mc_wait_for_idle = &r600_mc_wait_for_idle, .get_xclk = &r600_get_xclk, .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .get_allowed_info_register = r600_get_allowed_info_register, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -1009,6 +1025,7 @@ static struct radeon_asic rv6xx_asic = { .mc_wait_for_idle = &r600_mc_wait_for_idle, .get_xclk = &r600_get_xclk, .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .get_allowed_info_register = r600_get_allowed_info_register, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -1080,6 +1097,8 @@ static struct radeon_asic rv6xx_asic = { .print_power_state = &rv6xx_dpm_print_power_state, .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, .force_performance_level = &rv6xx_dpm_force_performance_level, + .get_current_sclk = &rv6xx_dpm_get_current_sclk, + .get_current_mclk = &rv6xx_dpm_get_current_mclk, }, .pflip = { .page_flip = &rs600_page_flip, @@ -1099,6 +1118,7 @@ static struct radeon_asic rs780_asic = { .mc_wait_for_idle = &r600_mc_wait_for_idle, .get_xclk = &r600_get_xclk, .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .get_allowed_info_register = r600_get_allowed_info_register, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -1170,6 +1190,8 @@ static struct radeon_asic rs780_asic = { .print_power_state = &rs780_dpm_print_power_state, .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, .force_performance_level = &rs780_dpm_force_performance_level, + .get_current_sclk = &rs780_dpm_get_current_sclk, + .get_current_mclk = &rs780_dpm_get_current_mclk, }, .pflip = { .page_flip = &rs600_page_flip, @@ -1202,6 +1224,7 @@ static struct radeon_asic rv770_asic = { .mc_wait_for_idle = &r600_mc_wait_for_idle, .get_xclk = &rv770_get_xclk, .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .get_allowed_info_register = r600_get_allowed_info_register, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -1274,6 +1297,8 @@ static struct radeon_asic rv770_asic = { .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, .force_performance_level = &rv770_dpm_force_performance_level, .vblank_too_short = &rv770_dpm_vblank_too_short, + .get_current_sclk = &rv770_dpm_get_current_sclk, + .get_current_mclk = &rv770_dpm_get_current_mclk, }, .pflip = { .page_flip = &rv770_page_flip, @@ -1319,6 +1344,7 @@ static struct radeon_asic evergreen_asic = { .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &rv770_get_xclk, .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .get_allowed_info_register = evergreen_get_allowed_info_register, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -1391,6 +1417,8 @@ static struct radeon_asic evergreen_asic = { .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, .force_performance_level = &rv770_dpm_force_performance_level, .vblank_too_short = &cypress_dpm_vblank_too_short, + .get_current_sclk = &rv770_dpm_get_current_sclk, + .get_current_mclk = &rv770_dpm_get_current_mclk, }, .pflip = { .page_flip = &evergreen_page_flip, @@ -1410,6 +1438,7 @@ static struct radeon_asic sumo_asic = { .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &r600_get_xclk, .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .get_allowed_info_register = evergreen_get_allowed_info_register, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -1481,6 +1510,8 @@ static struct radeon_asic sumo_asic = { .print_power_state = &sumo_dpm_print_power_state, .debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level, .force_performance_level = &sumo_dpm_force_performance_level, + .get_current_sclk = &sumo_dpm_get_current_sclk, + .get_current_mclk = &sumo_dpm_get_current_mclk, }, .pflip = { .page_flip = &evergreen_page_flip, @@ -1500,6 +1531,7 @@ static struct radeon_asic btc_asic = { .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &rv770_get_xclk, .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .get_allowed_info_register = evergreen_get_allowed_info_register, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -1572,6 +1604,8 @@ static struct radeon_asic btc_asic = { .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level, .force_performance_level = &rv770_dpm_force_performance_level, .vblank_too_short = &btc_dpm_vblank_too_short, + .get_current_sclk = &btc_dpm_get_current_sclk, + .get_current_mclk = &btc_dpm_get_current_mclk, }, .pflip = { .page_flip = &evergreen_page_flip, @@ -1634,6 +1668,7 @@ static struct radeon_asic cayman_asic = { .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &rv770_get_xclk, .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .get_allowed_info_register = cayman_get_allowed_info_register, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -1717,6 +1752,8 @@ static struct radeon_asic cayman_asic = { .debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level, .force_performance_level = &ni_dpm_force_performance_level, .vblank_too_short = &ni_dpm_vblank_too_short, + .get_current_sclk = &ni_dpm_get_current_sclk, + .get_current_mclk = &ni_dpm_get_current_mclk, }, .pflip = { .page_flip = &evergreen_page_flip, @@ -1736,6 +1773,7 @@ static struct radeon_asic trinity_asic = { .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &r600_get_xclk, .get_gpu_clock_counter = &r600_get_gpu_clock_counter, + .get_allowed_info_register = cayman_get_allowed_info_register, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -1819,6 +1857,8 @@ static struct radeon_asic trinity_asic = { .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, .force_performance_level = &trinity_dpm_force_performance_level, .enable_bapm = &trinity_dpm_enable_bapm, + .get_current_sclk = &trinity_dpm_get_current_sclk, + .get_current_mclk = &trinity_dpm_get_current_mclk, }, .pflip = { .page_flip = &evergreen_page_flip, @@ -1868,6 +1908,7 @@ static struct radeon_asic si_asic = { .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &si_get_xclk, .get_gpu_clock_counter = &si_get_gpu_clock_counter, + .get_allowed_info_register = si_get_allowed_info_register, .gart = { .tlb_flush = &si_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -1955,6 +1996,8 @@ static struct radeon_asic si_asic = { .fan_ctrl_get_mode = &si_fan_ctrl_get_mode, .get_fan_speed_percent = &si_fan_ctrl_get_fan_speed_percent, .set_fan_speed_percent = &si_fan_ctrl_set_fan_speed_percent, + .get_current_sclk = &si_dpm_get_current_sclk, + .get_current_mclk = &si_dpm_get_current_mclk, }, .pflip = { .page_flip = &evergreen_page_flip, @@ -2032,6 +2075,7 @@ static struct radeon_asic ci_asic = { .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &cik_get_xclk, .get_gpu_clock_counter = &cik_get_gpu_clock_counter, + .get_allowed_info_register = cik_get_allowed_info_register, .gart = { .tlb_flush = &cik_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -2123,6 +2167,8 @@ static struct radeon_asic ci_asic = { .fan_ctrl_get_mode = &ci_fan_ctrl_get_mode, .get_fan_speed_percent = &ci_fan_ctrl_get_fan_speed_percent, .set_fan_speed_percent = &ci_fan_ctrl_set_fan_speed_percent, + .get_current_sclk = &ci_dpm_get_current_sclk, + .get_current_mclk = &ci_dpm_get_current_mclk, }, .pflip = { .page_flip = &evergreen_page_flip, @@ -2142,6 +2188,7 @@ static struct radeon_asic kv_asic = { .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &cik_get_xclk, .get_gpu_clock_counter = &cik_get_gpu_clock_counter, + .get_allowed_info_register = cik_get_allowed_info_register, .gart = { .tlb_flush = &cik_pcie_gart_tlb_flush, .get_page_entry = &rs600_gart_get_page_entry, @@ -2229,6 +2276,8 @@ static struct radeon_asic kv_asic = { .force_performance_level = &kv_dpm_force_performance_level, .powergate_uvd = &kv_dpm_powergate_uvd, .enable_bapm = &kv_dpm_enable_bapm, + .get_current_sclk = &kv_dpm_get_current_sclk, + .get_current_mclk = &kv_dpm_get_current_mclk, }, .pflip = { .page_flip = &evergreen_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 72bdd3b..cf0a90b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -384,6 +384,8 @@ u32 r600_gfx_get_wptr(struct radeon_device *rdev, struct radeon_ring *ring); void r600_gfx_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring); +int r600_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val); /* r600 irq */ int r600_irq_process(struct radeon_device *rdev); int r600_irq_init(struct radeon_device *rdev); @@ -433,6 +435,8 @@ void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rde struct seq_file *m); int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); +u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev); +u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev); /* rs780 dpm */ int rs780_dpm_init(struct radeon_device *rdev); int rs780_dpm_enable(struct radeon_device *rdev); @@ -449,6 +453,8 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde struct seq_file *m); int rs780_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); +u32 rs780_dpm_get_current_sclk(struct radeon_device *rdev); +u32 rs780_dpm_get_current_mclk(struct radeon_device *rdev); /* * rv770,rv730,rv710,rv740 @@ -488,6 +494,8 @@ void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rde int rv770_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); bool rv770_dpm_vblank_too_short(struct radeon_device *rdev); +u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev); +u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev); /* * evergreen @@ -540,6 +548,8 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, unsigned num_gpu_pages, struct reservation_object *resv); int evergreen_get_temp(struct radeon_device *rdev); +int evergreen_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val); int sumo_get_temp(struct radeon_device *rdev); int tn_get_temp(struct radeon_device *rdev); int cypress_dpm_init(struct radeon_device *rdev); @@ -563,6 +573,8 @@ u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); bool btc_dpm_vblank_too_short(struct radeon_device *rdev); void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m); +u32 btc_dpm_get_current_sclk(struct radeon_device *rdev); +u32 btc_dpm_get_current_mclk(struct radeon_device *rdev); int sumo_dpm_init(struct radeon_device *rdev); int sumo_dpm_enable(struct radeon_device *rdev); int sumo_dpm_late_enable(struct radeon_device *rdev); @@ -581,6 +593,8 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev struct seq_file *m); int sumo_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); +u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev); +u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev); /* * cayman @@ -637,6 +651,8 @@ uint32_t cayman_dma_get_wptr(struct radeon_device *rdev, struct radeon_ring *ring); void cayman_dma_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring); +int cayman_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val); int ni_dpm_init(struct radeon_device *rdev); void ni_dpm_setup_asic(struct radeon_device *rdev); @@ -655,6 +671,8 @@ void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, int ni_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); bool ni_dpm_vblank_too_short(struct radeon_device *rdev); +u32 ni_dpm_get_current_sclk(struct radeon_device *rdev); +u32 ni_dpm_get_current_mclk(struct radeon_device *rdev); int trinity_dpm_init(struct radeon_device *rdev); int trinity_dpm_enable(struct radeon_device *rdev); int trinity_dpm_late_enable(struct radeon_device *rdev); @@ -674,6 +692,8 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r int trinity_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable); +u32 trinity_dpm_get_current_sclk(struct radeon_device *rdev); +u32 trinity_dpm_get_current_mclk(struct radeon_device *rdev); /* DCE6 - SI */ void dce6_bandwidth_update(struct radeon_device *rdev); @@ -726,6 +746,8 @@ u32 si_get_xclk(struct radeon_device *rdev); uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); int si_get_temp(struct radeon_device *rdev); +int si_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val); int si_dpm_init(struct radeon_device *rdev); void si_dpm_setup_asic(struct radeon_device *rdev); int si_dpm_enable(struct radeon_device *rdev); @@ -746,6 +768,8 @@ int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, u32 speed); u32 si_fan_ctrl_get_mode(struct radeon_device *rdev); void si_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode); +u32 si_dpm_get_current_sclk(struct radeon_device *rdev); +u32 si_dpm_get_current_mclk(struct radeon_device *rdev); /* DCE8 - CIK */ void dce8_bandwidth_update(struct radeon_device *rdev); @@ -841,6 +865,8 @@ void cik_sdma_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring); int ci_get_temp(struct radeon_device *rdev); int kv_get_temp(struct radeon_device *rdev); +int cik_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val); int ci_dpm_init(struct radeon_device *rdev); int ci_dpm_enable(struct radeon_device *rdev); @@ -862,6 +888,8 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); bool ci_dpm_vblank_too_short(struct radeon_device *rdev); void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); +u32 ci_dpm_get_current_sclk(struct radeon_device *rdev); +u32 ci_dpm_get_current_mclk(struct radeon_device *rdev); int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, u32 *speed); @@ -890,6 +918,8 @@ int kv_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable); +u32 kv_dpm_get_current_sclk(struct radeon_device *rdev); +u32 kv_dpm_get_current_mclk(struct radeon_device *rdev); /* uvd v1.0 */ uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index fc1b3f3..8f28524 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -845,6 +845,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) radeon_link_encoder_connector(dev); + radeon_setup_mst_connector(dev); return true; } diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index b21ef69..48d49e6 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -520,16 +520,40 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct drm_connector *connector; + struct radeon_connector *radeon_connector = NULL; u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; struct hdmi_avi_infoframe frame; int err; + list_for_each_entry(connector, + &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + radeon_connector = to_radeon_connector(connector); + break; + } + } + + if (!radeon_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return -ENOENT; + } + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %d\n", err); return err; } + if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) { + if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB) + frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED; + else + frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL; + } else { + frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; + } + err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); if (err < 0) { DRM_ERROR("failed to pack AVI infoframe: %d\n", err); diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 63ccb8f..d27e4cc 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) static bool radeon_read_bios(struct radeon_device *rdev) { - uint8_t __iomem *bios; + uint8_t __iomem *bios, val1, val2; size_t size; rdev->bios = NULL; @@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev) return false; } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + val1 = readb(&bios[0]); + val2 = readb(&bios[1]); + + if (size == 0 || val1 != 0x55 || val2 != 0xaa) { pci_unmap_rom(rdev->pdev, bios); return false; } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); + rdev->bios = kzalloc(size, GFP_KERNEL); if (rdev->bios == NULL) { pci_unmap_rom(rdev->pdev, bios); return false; } + memcpy_fromio(rdev->bios, bios, size); pci_unmap_rom(rdev->pdev, bios); return true; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 27def67..7ffa7d5 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -27,6 +27,7 @@ #include <drm/drm_edid.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_dp_mst_helper.h> #include <drm/radeon_drm.h> #include "radeon.h" #include "radeon_audio.h" @@ -34,12 +35,33 @@ #include <linux/pm_runtime.h> +static int radeon_dp_handle_hpd(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + int ret; + + ret = radeon_dp_mst_check_status(radeon_connector); + if (ret == -EINVAL) + return 1; + return 0; +} void radeon_connector_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; + + if (radeon_connector->is_mst_connector) + return; + if (dig_connector->is_mst) { + radeon_dp_handle_hpd(connector); + return; + } + } /* bail if the connector does not have hpd pin, e.g., * VGA, TV, etc. */ @@ -725,6 +747,30 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct radeon_property_change_mode(&radeon_encoder->base); } + if (property == rdev->mode_info.output_csc_property) { + if (connector->encoder) + radeon_encoder = to_radeon_encoder(connector->encoder); + else { + struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; + radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector)); + } + + if (radeon_encoder->output_csc == val) + return 0; + + radeon_encoder->output_csc = val; + + if (connector->encoder->crtc) { + struct drm_crtc *crtc = connector->encoder->crtc; + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + + radeon_crtc->output_csc = radeon_encoder->output_csc; + + (*crtc_funcs->load_lut)(crtc); + } + } + return 0; } @@ -1585,6 +1631,9 @@ radeon_dp_detect(struct drm_connector *connector, bool force) struct drm_encoder *encoder = radeon_best_single_encoder(connector); int r; + if (radeon_dig_connector->is_mst) + return connector_status_disconnected; + r = pm_runtime_get_sync(connector->dev->dev); if (r < 0) return connector_status_disconnected; @@ -1643,12 +1692,21 @@ radeon_dp_detect(struct drm_connector *connector, bool force) radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { ret = connector_status_connected; - if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) + if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { radeon_dp_getdpcd(radeon_connector); + r = radeon_dp_mst_probe(radeon_connector); + if (r == 1) + ret = connector_status_disconnected; + } } else { if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { - if (radeon_dp_getdpcd(radeon_connector)) - ret = connector_status_connected; + if (radeon_dp_getdpcd(radeon_connector)) { + r = radeon_dp_mst_probe(radeon_connector); + if (r == 1) + ret = connector_status_disconnected; + else + ret = connector_status_connected; + } } else { /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */ if (radeon_ddc_probe(radeon_connector, false)) @@ -1872,6 +1930,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); + if (ASIC_IS_DCE5(rdev)) + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.output_csc_property, + RADEON_OUTPUT_CSC_BYPASS); break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -1904,6 +1966,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); + if (ASIC_IS_DCE5(rdev)) + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.output_csc_property, + RADEON_OUTPUT_CSC_BYPASS); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; @@ -1950,6 +2016,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); + if (ASIC_IS_DCE5(rdev)) + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.output_csc_property, + RADEON_OUTPUT_CSC_BYPASS); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; @@ -1972,6 +2042,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); + if (ASIC_IS_DCE5(rdev)) + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.output_csc_property, + RADEON_OUTPUT_CSC_BYPASS); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->interlace_allowed = true; @@ -2023,6 +2097,10 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.load_detect_property, 1); } + if (ASIC_IS_DCE5(rdev)) + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.output_csc_property, + RADEON_OUTPUT_CSC_BYPASS); connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_DVII) connector->doublescan_allowed = true; @@ -2068,6 +2146,10 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); } + if (ASIC_IS_DCE5(rdev)) + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.output_csc_property, + RADEON_OUTPUT_CSC_BYPASS); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_HDMIB) @@ -2116,6 +2198,10 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); } + if (ASIC_IS_DCE5(rdev)) + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.output_csc_property, + RADEON_OUTPUT_CSC_BYPASS); connector->interlace_allowed = true; /* in theory with a DP to VGA converter... */ connector->doublescan_allowed = false; @@ -2352,3 +2438,27 @@ radeon_add_legacy_connector(struct drm_device *dev, connector->display_info.subpixel_order = subpixel_order; drm_connector_register(connector); } + +void radeon_setup_mst_connector(struct drm_device *dev) +{ + struct radeon_device *rdev = dev->dev_private; + struct drm_connector *connector; + struct radeon_connector *radeon_connector; + + if (!ASIC_IS_DCE5(rdev)) + return; + + if (radeon_mst == 0) + return; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + int ret; + + radeon_connector = to_radeon_connector(connector); + + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + continue; + + ret = radeon_dp_mst_init(radeon_connector); + } +} diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index bd7519f..b7ca4c5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1442,6 +1442,11 @@ int radeon_device_init(struct radeon_device *rdev, DRM_ERROR("registering gem debugfs failed (%d).\n", r); } + r = radeon_mst_debugfs_init(rdev); + if (r) { + DRM_ERROR("registering mst debugfs failed (%d).\n", r); + } + if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { /* Acceleration not working on AGP card try again * with fallback to PCI or PCIE GART diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 913fafa..d2e9e9e 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -154,7 +154,7 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc) (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, - (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) | + (NI_OUTPUT_CSC_GRPH_MODE(radeon_crtc->output_csc) | NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); /* XXX match this to the depth of the crtc fmt block, move to modeset? */ WREG32(0x6940 + radeon_crtc->crtc_offset, 0); @@ -1382,6 +1382,13 @@ static struct drm_prop_enum_list radeon_dither_enum_list[] = { RADEON_FMT_DITHER_ENABLE, "on" }, }; +static struct drm_prop_enum_list radeon_output_csc_enum_list[] = +{ { RADEON_OUTPUT_CSC_BYPASS, "bypass" }, + { RADEON_OUTPUT_CSC_TVRGB, "tvrgb" }, + { RADEON_OUTPUT_CSC_YCBCR601, "ycbcr601" }, + { RADEON_OUTPUT_CSC_YCBCR709, "ycbcr709" }, +}; + static int radeon_modeset_create_props(struct radeon_device *rdev) { int sz; @@ -1444,6 +1451,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) "dither", radeon_dither_enum_list, sz); + sz = ARRAY_SIZE(radeon_output_csc_enum_list); + rdev->mode_info.output_csc_property = + drm_property_create_enum(rdev->ddev, 0, + "output_csc", + radeon_output_csc_enum_list, sz); + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c new file mode 100644 index 0000000..bf1fecc --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c @@ -0,0 +1,206 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + */ +#include <drm/drmP.h> +#include <drm/radeon_drm.h> +#include "radeon.h" +#include "nid.h" + +#define AUX_RX_ERROR_FLAGS (AUX_SW_RX_OVERFLOW | \ + AUX_SW_RX_HPD_DISCON | \ + AUX_SW_RX_PARTIAL_BYTE | \ + AUX_SW_NON_AUX_MODE | \ + AUX_SW_RX_MIN_COUNT_VIOL | \ + AUX_SW_RX_INVALID_STOP | \ + AUX_SW_RX_SYNC_INVALID_L | \ + AUX_SW_RX_SYNC_INVALID_H | \ + AUX_SW_RX_INVALID_START | \ + AUX_SW_RX_RECV_NO_DET | \ + AUX_SW_RX_RECV_INVALID_H | \ + AUX_SW_RX_RECV_INVALID_V) + +#define AUX_SW_REPLY_GET_BYTE_COUNT(x) (((x) >> 24) & 0x1f) + +#define BARE_ADDRESS_SIZE 3 + +static const u32 aux_offset[] = +{ + 0x6200 - 0x6200, + 0x6250 - 0x6200, + 0x62a0 - 0x6200, + 0x6300 - 0x6200, + 0x6350 - 0x6200, + 0x63a0 - 0x6200, +}; + +ssize_t +radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + struct radeon_i2c_chan *chan = + container_of(aux, struct radeon_i2c_chan, aux); + struct drm_device *dev = chan->dev; + struct radeon_device *rdev = dev->dev_private; + int ret = 0, i; + uint32_t tmp, ack = 0; + int instance = chan->rec.i2c_id & 0xf; + u8 byte; + u8 *buf = msg->buffer; + int retry_count = 0; + int bytes; + int msize; + bool is_write = false; + + if (WARN_ON(msg->size > 16)) + return -E2BIG; + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + is_write = true; + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + break; + default: + return -EINVAL; + } + + /* work out two sizes required */ + msize = 0; + bytes = BARE_ADDRESS_SIZE; + if (msg->size) { + msize = msg->size - 1; + bytes++; + if (is_write) + bytes += msg->size; + } + + mutex_lock(&chan->mutex); + + /* switch the pad to aux mode */ + tmp = RREG32(chan->rec.mask_clk_reg); + tmp |= (1 << 16); + WREG32(chan->rec.mask_clk_reg, tmp); + + /* setup AUX control register with correct HPD pin */ + tmp = RREG32(AUX_CONTROL + aux_offset[instance]); + + tmp &= AUX_HPD_SEL(0x7); + tmp |= AUX_HPD_SEL(chan->rec.hpd); + tmp |= AUX_EN | AUX_LS_READ_EN; + + WREG32(AUX_CONTROL + aux_offset[instance], tmp); + + /* atombios appears to write this twice lets copy it */ + WREG32(AUX_SW_CONTROL + aux_offset[instance], + AUX_SW_WR_BYTES(bytes)); + WREG32(AUX_SW_CONTROL + aux_offset[instance], + AUX_SW_WR_BYTES(bytes)); + + /* write the data header into the registers */ + /* request, addres, msg size */ + byte = (msg->request << 4); + WREG32(AUX_SW_DATA + aux_offset[instance], + AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE); + + byte = (msg->address >> 8) & 0xff; + WREG32(AUX_SW_DATA + aux_offset[instance], + AUX_SW_DATA_MASK(byte)); + + byte = msg->address & 0xff; + WREG32(AUX_SW_DATA + aux_offset[instance], + AUX_SW_DATA_MASK(byte)); + + byte = msize; + WREG32(AUX_SW_DATA + aux_offset[instance], + AUX_SW_DATA_MASK(byte)); + + /* if we are writing - write the msg buffer */ + if (is_write) { + for (i = 0; i < msg->size; i++) { + WREG32(AUX_SW_DATA + aux_offset[instance], + AUX_SW_DATA_MASK(buf[i])); + } + } + + /* clear the ACK */ + WREG32(AUX_SW_INTERRUPT_CONTROL + aux_offset[instance], AUX_SW_DONE_ACK); + + /* write the size and GO bits */ + WREG32(AUX_SW_CONTROL + aux_offset[instance], + AUX_SW_WR_BYTES(bytes) | AUX_SW_GO); + + /* poll the status registers - TODO irq support */ + do { + tmp = RREG32(AUX_SW_STATUS + aux_offset[instance]); + if (tmp & AUX_SW_DONE) { + break; + } + usleep_range(100, 200); + } while (retry_count++ < 1000); + + if (retry_count >= 1000) { + DRM_ERROR("auxch hw never signalled completion, error %08x\n", tmp); + ret = -EIO; + goto done; + } + + if (tmp & AUX_SW_RX_TIMEOUT) { + DRM_DEBUG_KMS("dp_aux_ch timed out\n"); + ret = -ETIMEDOUT; + goto done; + } + if (tmp & AUX_RX_ERROR_FLAGS) { + DRM_DEBUG_KMS("dp_aux_ch flags not zero: %08x\n", tmp); + ret = -EIO; + goto done; + } + + bytes = AUX_SW_REPLY_GET_BYTE_COUNT(tmp); + if (bytes) { + WREG32(AUX_SW_DATA + aux_offset[instance], + AUX_SW_DATA_RW | AUX_SW_AUTOINCREMENT_DISABLE); + + tmp = RREG32(AUX_SW_DATA + aux_offset[instance]); + ack = (tmp >> 8) & 0xff; + + for (i = 0; i < bytes - 1; i++) { + tmp = RREG32(AUX_SW_DATA + aux_offset[instance]); + if (buf) + buf[i] = (tmp >> 8) & 0xff; + } + if (buf) + ret = bytes - 1; + } + + WREG32(AUX_SW_INTERRUPT_CONTROL + aux_offset[instance], AUX_SW_DONE_ACK); + + if (is_write) + ret = msg->size; +done: + mutex_unlock(&chan->mutex); + + if (ret >= 0) + msg->reply = ack >> 4; + return ret; +} diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c new file mode 100644 index 0000000..5952ff2 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -0,0 +1,782 @@ + +#include <drm/drmP.h> +#include <drm/drm_dp_mst_helper.h> +#include <drm/drm_fb_helper.h> + +#include "radeon.h" +#include "atom.h" +#include "ni_reg.h" + +static struct radeon_encoder *radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector); + +static int radeon_atom_set_enc_offset(int id) +{ + static const int offsets[] = { EVERGREEN_CRTC0_REGISTER_OFFSET, + EVERGREEN_CRTC1_REGISTER_OFFSET, + EVERGREEN_CRTC2_REGISTER_OFFSET, + EVERGREEN_CRTC3_REGISTER_OFFSET, + EVERGREEN_CRTC4_REGISTER_OFFSET, + EVERGREEN_CRTC5_REGISTER_OFFSET, + 0x13830 - 0x7030 }; + + return offsets[id]; +} + +static int radeon_dp_mst_set_be_cntl(struct radeon_encoder *primary, + struct radeon_encoder_mst *mst_enc, + enum radeon_hpd_id hpd, bool enable) +{ + struct drm_device *dev = primary->base.dev; + struct radeon_device *rdev = dev->dev_private; + uint32_t reg; + int retries = 0; + uint32_t temp; + + reg = RREG32(NI_DIG_BE_CNTL + primary->offset); + + /* set MST mode */ + reg &= ~NI_DIG_FE_DIG_MODE(7); + reg |= NI_DIG_FE_DIG_MODE(NI_DIG_MODE_DP_MST); + + if (enable) + reg |= NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe); + else + reg &= ~NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe); + + reg |= NI_DIG_HPD_SELECT(hpd); + DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DIG_BE_CNTL + primary->offset, reg); + WREG32(NI_DIG_BE_CNTL + primary->offset, reg); + + if (enable) { + uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe); + + do { + temp = RREG32(NI_DIG_FE_CNTL + offset); + } while ((temp & NI_DIG_SYMCLK_FE_ON) && retries++ < 10000); + if (retries == 10000) + DRM_ERROR("timed out waiting for FE %d %d\n", primary->offset, mst_enc->fe); + } + return 0; +} + +static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary, + int stream_number, + int fe, + int slots) +{ + struct drm_device *dev = primary->base.dev; + struct radeon_device *rdev = dev->dev_private; + u32 temp, val; + int retries = 0; + int satreg, satidx; + + satreg = stream_number >> 1; + satidx = stream_number & 1; + + temp = RREG32(NI_DP_MSE_SAT0 + satreg + primary->offset); + + val = NI_DP_MSE_SAT_SLOT_COUNT0(slots) | NI_DP_MSE_SAT_SRC0(fe); + + val <<= (16 * satidx); + + temp &= ~(0xffff << (16 * satidx)); + + temp |= val; + + DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DP_MSE_SAT0 + satreg + primary->offset, temp); + WREG32(NI_DP_MSE_SAT0 + satreg + primary->offset, temp); + + WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1); + + do { + temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset); + } while ((temp & 0x1) && retries++ < 10000); + + if (retries == 10000) + DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset); + + /* MTP 16 ? */ + return 0; +} + +static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn, + struct radeon_encoder *primary) +{ + struct drm_device *dev = mst_conn->base.dev; + struct stream_attribs new_attribs[6]; + int i; + int idx = 0; + struct radeon_connector *radeon_connector; + struct drm_connector *connector; + + memset(new_attribs, 0, sizeof(new_attribs)); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_encoder *subenc; + struct radeon_encoder_mst *mst_enc; + + radeon_connector = to_radeon_connector(connector); + if (!radeon_connector->is_mst_connector) + continue; + + if (radeon_connector->mst_port != mst_conn) + continue; + + subenc = radeon_connector->mst_encoder; + mst_enc = subenc->enc_priv; + + if (!mst_enc->enc_active) + continue; + + new_attribs[idx].fe = mst_enc->fe; + new_attribs[idx].slots = drm_dp_mst_get_vcpi_slots(&mst_conn->mst_mgr, mst_enc->port); + idx++; + } + + for (i = 0; i < idx; i++) { + if (new_attribs[i].fe != mst_conn->cur_stream_attribs[i].fe || + new_attribs[i].slots != mst_conn->cur_stream_attribs[i].slots) { + radeon_dp_mst_set_stream_attrib(primary, i, new_attribs[i].fe, new_attribs[i].slots); + mst_conn->cur_stream_attribs[i].fe = new_attribs[i].fe; + mst_conn->cur_stream_attribs[i].slots = new_attribs[i].slots; + } + } + + for (i = idx; i < mst_conn->enabled_attribs; i++) { + radeon_dp_mst_set_stream_attrib(primary, i, 0, 0); + mst_conn->cur_stream_attribs[i].fe = 0; + mst_conn->cur_stream_attribs[i].slots = 0; + } + mst_conn->enabled_attribs = idx; + return 0; +} + +static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y) +{ + struct drm_device *dev = mst->base.dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder_mst *mst_enc = mst->enc_priv; + uint32_t val, temp; + uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe); + int retries = 0; + + val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y); + + WREG32(NI_DP_MSE_RATE_CNTL + offset, val); + + do { + temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset); + } while ((temp & 0x1) && (retries++ < 10000)); + + if (retries >= 10000) + DRM_ERROR("timed out wait for rate cntl %d\n", mst_enc->fe); + return 0; +} + +static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector *master = radeon_connector->mst_port; + struct edid *edid; + int ret = 0; + + edid = drm_dp_mst_get_edid(connector, &master->mst_mgr, radeon_connector->port); + radeon_connector->edid = edid; + DRM_DEBUG_KMS("edid retrieved %p\n", edid); + if (radeon_connector->edid) { + drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); + ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); + drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); + return ret; + } + drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); + + return ret; +} + +static int radeon_dp_mst_get_modes(struct drm_connector *connector) +{ + return radeon_dp_mst_get_ddc_modes(connector); +} + +static enum drm_mode_status +radeon_dp_mst_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* TODO - validate mode against available PBN for link */ + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return MODE_H_ILLEGAL; + + return MODE_OK; +} + +struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + + return &radeon_connector->mst_encoder->base; +} + +static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = { + .get_modes = radeon_dp_mst_get_modes, + .mode_valid = radeon_dp_mst_mode_valid, + .best_encoder = radeon_mst_best_encoder, +}; + +static enum drm_connector_status +radeon_dp_mst_detect(struct drm_connector *connector, bool force) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector *master = radeon_connector->mst_port; + + return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port); +} + +static void +radeon_dp_mst_connector_destroy(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_encoder *radeon_encoder = radeon_connector->mst_encoder; + + drm_encoder_cleanup(&radeon_encoder->base); + kfree(radeon_encoder); + drm_connector_cleanup(connector); + kfree(radeon_connector); +} + +static void radeon_connector_dpms(struct drm_connector *connector, int mode) +{ + DRM_DEBUG_KMS("\n"); +} + +static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = { + .dpms = radeon_connector_dpms, + .detect = radeon_dp_mst_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = radeon_dp_mst_connector_destroy, +}; + +static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + const char *pathprop) +{ + struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_connector *radeon_connector; + struct drm_connector *connector; + + radeon_connector = kzalloc(sizeof(*radeon_connector), GFP_KERNEL); + if (!radeon_connector) + return NULL; + + radeon_connector->is_mst_connector = true; + connector = &radeon_connector->base; + radeon_connector->port = port; + radeon_connector->mst_port = master; + DRM_DEBUG_KMS("\n"); + + drm_connector_init(dev, connector, &radeon_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); + drm_connector_helper_add(connector, &radeon_dp_mst_connector_helper_funcs); + radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master); + + drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); + drm_mode_connector_set_path_property(connector, pathprop); + drm_reinit_primary_mode_group(dev); + + mutex_lock(&dev->mode_config.mutex); + radeon_fb_add_connector(rdev, connector); + mutex_unlock(&dev->mode_config.mutex); + + drm_connector_register(connector); + return connector; +} + +static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector) +{ + struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + struct radeon_device *rdev = dev->dev_private; + + drm_connector_unregister(connector); + /* need to nuke the connector */ + mutex_lock(&dev->mode_config.mutex); + /* dpms off */ + radeon_fb_remove_connector(rdev, connector); + + drm_connector_cleanup(connector); + mutex_unlock(&dev->mode_config.mutex); + drm_reinit_primary_mode_group(dev); + + + kfree(connector); + DRM_DEBUG_KMS("\n"); +} + +static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) +{ + struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + + drm_kms_helper_hotplug_event(dev); +} + +struct drm_dp_mst_topology_cbs mst_cbs = { + .add_connector = radeon_dp_add_mst_connector, + .destroy_connector = radeon_dp_destroy_mst_connector, + .hotplug = radeon_dp_mst_hotplug, +}; + +struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + if (!connector->encoder) + continue; + if (!radeon_connector->is_mst_connector) + continue; + + DRM_DEBUG_KMS("checking %p vs %p\n", connector->encoder, encoder); + if (connector->encoder == encoder) + return radeon_connector; + } + return NULL; +} + +void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder); + struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv; + struct radeon_connector *radeon_connector = radeon_mst_find_connector(&radeon_encoder->base); + int dp_clock; + struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv; + + if (radeon_connector) { + radeon_connector->pixelclock_for_modeset = mode->clock; + if (radeon_connector->base.display_info.bpc) + radeon_crtc->bpc = radeon_connector->base.display_info.bpc; + else + radeon_crtc->bpc = 8; + } + + DRM_DEBUG_KMS("dp_clock %p %d\n", dig_connector, dig_connector->dp_clock); + dp_clock = dig_connector->dp_clock; + radeon_crtc->ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss, + ASIC_INTERNAL_SS_ON_DP, + dp_clock); +} + +static void +radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder, *primary; + struct radeon_encoder_mst *mst_enc; + struct radeon_encoder_atom_dig *dig_enc; + struct radeon_connector *radeon_connector; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + int ret, slots; + + if (!ASIC_IS_DCE5(rdev)) { + DRM_ERROR("got mst dpms on non-DCE5\n"); + return; + } + + radeon_connector = radeon_mst_find_connector(encoder); + if (!radeon_connector) + return; + + radeon_encoder = to_radeon_encoder(encoder); + + mst_enc = radeon_encoder->enc_priv; + + primary = mst_enc->primary; + + dig_enc = primary->enc_priv; + + crtc = encoder->crtc; + DRM_DEBUG_KMS("got connector %d\n", dig_enc->active_mst_links); + + switch (mode) { + case DRM_MODE_DPMS_ON: + dig_enc->active_mst_links++; + + radeon_crtc = to_radeon_crtc(crtc); + + if (dig_enc->active_mst_links == 1) { + mst_enc->fe = dig_enc->dig_encoder; + mst_enc->fe_from_be = true; + atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe); + + atombios_dig_encoder_setup(&primary->base, ATOM_ENCODER_CMD_SETUP, 0); + atombios_dig_transmitter_setup2(&primary->base, ATOM_TRANSMITTER_ACTION_ENABLE, + 0, 0, dig_enc->dig_encoder); + + if (radeon_dp_needs_link_train(mst_enc->connector) || + dig_enc->active_mst_links == 1) { + radeon_dp_link_train(&primary->base, &mst_enc->connector->base); + } + + } else { + mst_enc->fe = radeon_atom_pick_dig_encoder(encoder, radeon_crtc->crtc_id); + if (mst_enc->fe == -1) + DRM_ERROR("failed to get frontend for dig encoder\n"); + mst_enc->fe_from_be = false; + atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe); + } + + DRM_DEBUG_KMS("dig encoder is %d %d %d\n", dig_enc->dig_encoder, + dig_enc->linkb, radeon_crtc->crtc_id); + + ret = drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr, + radeon_connector->port, + mst_enc->pbn, &slots); + ret = drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr); + + radeon_dp_mst_set_be_cntl(primary, mst_enc, + radeon_connector->mst_port->hpd.hpd, true); + + mst_enc->enc_active = true; + radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary); + radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0); + + atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0, + mst_enc->fe); + ret = drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr); + + ret = drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr); + + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + DRM_ERROR("DPMS OFF %d\n", dig_enc->active_mst_links); + + if (!mst_enc->enc_active) + return; + + drm_dp_mst_reset_vcpi_slots(&radeon_connector->mst_port->mst_mgr, mst_enc->port); + ret = drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr); + + drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr); + /* and this can also fail */ + drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr); + + drm_dp_mst_deallocate_vcpi(&radeon_connector->mst_port->mst_mgr, mst_enc->port); + + mst_enc->enc_active = false; + radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary); + + radeon_dp_mst_set_be_cntl(primary, mst_enc, + radeon_connector->mst_port->hpd.hpd, false); + atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0, + mst_enc->fe); + + if (!mst_enc->fe_from_be) + radeon_atom_release_dig_encoder(rdev, mst_enc->fe); + + mst_enc->fe_from_be = false; + dig_enc->active_mst_links--; + if (dig_enc->active_mst_links == 0) { + /* drop link */ + } + + break; + } + +} + +static bool radeon_mst_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct radeon_encoder_mst *mst_enc; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + int bpp = 24; + + mst_enc = radeon_encoder->enc_priv; + + mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp); + + mst_enc->primary->active_device = mst_enc->primary->devices & mst_enc->connector->devices; + DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n", + mst_enc->primary->active_device, mst_enc->primary->devices, + mst_enc->connector->devices, mst_enc->primary->base.encoder_type); + + + drm_mode_set_crtcinfo(adjusted_mode, 0); + { + struct radeon_connector_atom_dig *dig_connector; + + dig_connector = mst_enc->connector->con_priv; + dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd); + dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base, + dig_connector->dpcd); + DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector, + dig_connector->dp_lane_count, dig_connector->dp_clock); + } + return true; +} + +static void radeon_mst_encoder_prepare(struct drm_encoder *encoder) +{ + struct radeon_connector *radeon_connector; + struct radeon_encoder *radeon_encoder, *primary; + struct radeon_encoder_mst *mst_enc; + struct radeon_encoder_atom_dig *dig_enc; + + radeon_connector = radeon_mst_find_connector(encoder); + if (!radeon_connector) { + DRM_DEBUG_KMS("failed to find connector %p\n", encoder); + return; + } + radeon_encoder = to_radeon_encoder(encoder); + + radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + mst_enc = radeon_encoder->enc_priv; + + primary = mst_enc->primary; + + dig_enc = primary->enc_priv; + + mst_enc->port = radeon_connector->port; + + if (dig_enc->dig_encoder == -1) { + dig_enc->dig_encoder = radeon_atom_pick_dig_encoder(&primary->base, -1); + primary->offset = radeon_atom_set_enc_offset(dig_enc->dig_encoder); + atombios_set_mst_encoder_crtc_source(encoder, dig_enc->dig_encoder); + + + } + DRM_DEBUG_KMS("%d %d\n", dig_enc->dig_encoder, primary->offset); +} + +static void +radeon_mst_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + DRM_DEBUG_KMS("\n"); +} + +static void radeon_mst_encoder_commit(struct drm_encoder *encoder) +{ + radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + DRM_DEBUG_KMS("\n"); +} + +static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = { + .dpms = radeon_mst_encoder_dpms, + .mode_fixup = radeon_mst_mode_fixup, + .prepare = radeon_mst_encoder_prepare, + .mode_set = radeon_mst_encoder_mode_set, + .commit = radeon_mst_encoder_commit, +}; + +void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_funcs radeon_dp_mst_enc_funcs = { + .destroy = radeon_dp_mst_encoder_destroy, +}; + +static struct radeon_encoder * +radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector) +{ + struct drm_device *dev = connector->base.dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder; + struct radeon_encoder_mst *mst_enc; + struct drm_encoder *encoder; + struct drm_connector_helper_funcs *connector_funcs = connector->base.helper_private; + struct drm_encoder *enc_master = connector_funcs->best_encoder(&connector->base); + + DRM_DEBUG_KMS("enc master is %p\n", enc_master); + radeon_encoder = kzalloc(sizeof(*radeon_encoder), GFP_KERNEL); + if (!radeon_encoder) + return NULL; + + radeon_encoder->enc_priv = kzalloc(sizeof(*mst_enc), GFP_KERNEL); + if (!radeon_encoder->enc_priv) { + kfree(radeon_encoder); + return NULL; + } + encoder = &radeon_encoder->base; + switch (rdev->num_crtc) { + case 1: + encoder->possible_crtcs = 0x1; + break; + case 2: + default: + encoder->possible_crtcs = 0x3; + break; + case 4: + encoder->possible_crtcs = 0xf; + break; + case 6: + encoder->possible_crtcs = 0x3f; + break; + } + + drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs, + DRM_MODE_ENCODER_DPMST); + drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs); + + mst_enc = radeon_encoder->enc_priv; + mst_enc->connector = connector; + mst_enc->primary = to_radeon_encoder(enc_master); + radeon_encoder->is_mst_encoder = true; + return radeon_encoder; +} + +int +radeon_dp_mst_init(struct radeon_connector *radeon_connector) +{ + struct drm_device *dev = radeon_connector->base.dev; + + if (!radeon_connector->ddc_bus->has_aux) + return 0; + + radeon_connector->mst_mgr.cbs = &mst_cbs; + return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev->dev, + &radeon_connector->ddc_bus->aux, 16, 6, + radeon_connector->base.base.id); +} + +int +radeon_dp_mst_probe(struct radeon_connector *radeon_connector) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + int ret; + u8 msg[1]; + + if (dig_connector->dpcd[DP_DPCD_REV] < 0x12) + return 0; + + ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_MSTM_CAP, msg, + 1); + if (ret) { + if (msg[0] & DP_MST_CAP) { + DRM_DEBUG_KMS("Sink is MST capable\n"); + dig_connector->is_mst = true; + } else { + DRM_DEBUG_KMS("Sink is not MST capable\n"); + dig_connector->is_mst = false; + } + + } + drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr, + dig_connector->is_mst); + return dig_connector->is_mst; +} + +int +radeon_dp_mst_check_status(struct radeon_connector *radeon_connector) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + int retry; + + if (dig_connector->is_mst) { + u8 esi[16] = { 0 }; + int dret; + int ret = 0; + bool handled; + + dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, + DP_SINK_COUNT_ESI, esi, 8); +go_again: + if (dret == 8) { + DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]); + ret = drm_dp_mst_hpd_irq(&radeon_connector->mst_mgr, esi, &handled); + + if (handled) { + for (retry = 0; retry < 3; retry++) { + int wret; + wret = drm_dp_dpcd_write(&radeon_connector->ddc_bus->aux, + DP_SINK_COUNT_ESI + 1, &esi[1], 3); + if (wret == 3) + break; + } + + dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, + DP_SINK_COUNT_ESI, esi, 8); + if (dret == 8) { + DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]); + goto go_again; + } + } else + ret = 0; + + return ret; + } else { + DRM_DEBUG_KMS("failed to get ESI - device may have failed %d\n", ret); + dig_connector->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr, + dig_connector->is_mst); + /* send a hotplug event */ + } + } + return -EINVAL; +} + +#if defined(CONFIG_DEBUG_FS) + +static int radeon_debugfs_mst_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct drm_connector *connector; + struct radeon_connector *radeon_connector; + struct radeon_connector_atom_dig *dig_connector; + int i; + + drm_modeset_lock_all(dev); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + continue; + + radeon_connector = to_radeon_connector(connector); + dig_connector = radeon_connector->con_priv; + if (radeon_connector->is_mst_connector) + continue; + if (!dig_connector->is_mst) + continue; + drm_dp_mst_dump_topology(m, &radeon_connector->mst_mgr); + + for (i = 0; i < radeon_connector->enabled_attribs; i++) + seq_printf(m, "attrib %d: %d %d\n", i, + radeon_connector->cur_stream_attribs[i].fe, + radeon_connector->cur_stream_attribs[i].slots); + } + drm_modeset_unlock_all(dev); + return 0; +} + +static struct drm_info_list radeon_debugfs_mst_list[] = { + {"radeon_mst_info", &radeon_debugfs_mst_info, 0, NULL}, +}; +#endif + +int radeon_mst_debugfs_init(struct radeon_device *rdev) +{ +#if defined(CONFIG_DEBUG_FS) + return radeon_debugfs_add_files(rdev, radeon_debugfs_mst_list, 1); +#endif + return 0; +} diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 5d684be..d688f6c 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -190,6 +190,8 @@ int radeon_deep_color = 0; int radeon_use_pflipirq = 2; int radeon_bapm = -1; int radeon_backlight = -1; +int radeon_auxch = -1; +int radeon_mst = 0; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -239,7 +241,7 @@ module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); module_param_named(msi, radeon_msi, int, 0444); -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)"); +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 10000 = 10 seconds, 0 = disable)"); module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444); MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)"); @@ -275,6 +277,12 @@ module_param_named(bapm, radeon_bapm, int, 0444); MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)"); module_param_named(backlight, radeon_backlight, int, 0444); +MODULE_PARM_DESC(auxch, "Use native auxch experimental support (1 = enable, 0 = disable, -1 = auto)"); +module_param_named(auxch, radeon_auxch, int, 0444); + +MODULE_PARM_DESC(mst, "DisplayPort MST experimental support (1 = enable, 0 = disable)"); +module_param_named(mst, radeon_mst, int, 0444); + static struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 3a29703..ef99917 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -247,7 +247,16 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); - if (radeon_encoder->active_device & radeon_connector->devices) + if (radeon_encoder->is_mst_encoder) { + struct radeon_encoder_mst *mst_enc; + + if (!radeon_connector->is_mst_connector) + continue; + + mst_enc = radeon_encoder->enc_priv; + if (mst_enc->connector == radeon_connector->mst_port) + return connector; + } else if (radeon_encoder->active_device & radeon_connector->devices) return connector; } return NULL; @@ -393,6 +402,9 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_DisplayPort: + if (radeon_connector->is_mst_connector) + return false; + dig_connector = radeon_connector->con_priv; if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index ea276ff..aeb6767 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -257,6 +257,7 @@ static int radeonfb_create(struct drm_fb_helper *helper, } info->par = rfbdev; + info->skip_vt_switch = true; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { @@ -434,3 +435,13 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) return true; return false; } + +void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector) +{ + drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector); +} + +void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector) +{ + drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector); +} diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 00fc597..7162c93 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -87,6 +87,20 @@ static void radeon_hotplug_work_func(struct work_struct *work) drm_helper_hpd_irq_event(dev); } +static void radeon_dp_work_func(struct work_struct *work) +{ + struct radeon_device *rdev = container_of(work, struct radeon_device, + dp_work); + struct drm_device *dev = rdev->ddev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + + /* this should take a mutex */ + if (mode_config->num_connector) { + list_for_each_entry(connector, &mode_config->connector_list, head) + radeon_connector_hotplug(connector); + } +} /** * radeon_driver_irq_preinstall_kms - drm irq preinstall callback * @@ -276,6 +290,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) } INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); + INIT_WORK(&rdev->dp_work, radeon_dp_work_func); INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); rdev->irq.installed = true; diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 122eb56..3db2300 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -103,15 +103,14 @@ static const struct kgd2kfd_calls *kgd2kfd; bool radeon_kfd_init(void) { #if defined(CONFIG_HSA_AMD_MODULE) - bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*, - const struct kgd2kfd_calls**); + bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); kgd2kfd_init_p = symbol_request(kgd2kfd_init); if (kgd2kfd_init_p == NULL) return false; - if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) { + if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) { symbol_put(kgd2kfd_init); kgd2kfd = NULL; @@ -120,7 +119,7 @@ bool radeon_kfd_init(void) return true; #elif defined(CONFIG_HSA_AMD) - if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) { + if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) { kgd2kfd = NULL; return false; @@ -143,7 +142,8 @@ void radeon_kfd_fini(void) void radeon_kfd_device_probe(struct radeon_device *rdev) { if (kgd2kfd) - rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev, rdev->pdev); + rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev, + rdev->pdev, &kfd2kgd); } void radeon_kfd_device_init(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 686411e..7b2a733 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -547,6 +547,35 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file else *value = 1; break; + case RADEON_INFO_CURRENT_GPU_TEMP: + /* get temperature in millidegrees C */ + if (rdev->asic->pm.get_temperature) + *value = radeon_get_temperature(rdev); + else + *value = 0; + break; + case RADEON_INFO_CURRENT_GPU_SCLK: + /* get sclk in Mhz */ + if (rdev->pm.dpm_enabled) + *value = radeon_dpm_get_current_sclk(rdev) / 100; + else + *value = rdev->pm.current_sclk / 100; + break; + case RADEON_INFO_CURRENT_GPU_MCLK: + /* get mclk in Mhz */ + if (rdev->pm.dpm_enabled) + *value = radeon_dpm_get_current_mclk(rdev) / 100; + else + *value = rdev->pm.current_mclk / 100; + break; + case RADEON_INFO_READ_REG: + if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { + DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); + return -EFAULT; + } + if (radeon_get_allowed_info_register(rdev, *value, value)) + return -EINVAL; + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index a69bd44..572b4db 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct radeon_bo *bo; - struct fence *fence; int r; bo = container_of(it, struct radeon_bo, mn_it); @@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, continue; } - fence = reservation_object_get_excl(bo->tbo.resv); - if (fence) { - r = radeon_fence_wait((struct radeon_fence *)fence, false); - if (r) - DRM_ERROR("(%d) failed to wait for user bo\n", r); - } + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, + false, MAX_SCHEDULE_TIMEOUT); + if (r) + DRM_ERROR("(%d) failed to wait for user bo\n", r); radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 920a8be..fa91a17 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -33,6 +33,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_dp_helper.h> +#include <drm/drm_dp_mst_helper.h> #include <drm/drm_fixed.h> #include <drm/drm_crtc_helper.h> #include <linux/i2c.h> @@ -85,6 +86,13 @@ enum radeon_hpd_id { RADEON_HPD_NONE = 0xff, }; +enum radeon_output_csc { + RADEON_OUTPUT_CSC_BYPASS = 0, + RADEON_OUTPUT_CSC_TVRGB = 1, + RADEON_OUTPUT_CSC_YCBCR601 = 2, + RADEON_OUTPUT_CSC_YCBCR709 = 3, +}; + #define RADEON_MAX_I2C_BUS 16 /* radeon gpio-based i2c @@ -255,6 +263,8 @@ struct radeon_mode_info { struct drm_property *audio_property; /* FMT dithering */ struct drm_property *dither_property; + /* Output CSC */ + struct drm_property *output_csc_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; int bios_hardcoded_edid_size; @@ -265,6 +275,9 @@ struct radeon_mode_info { u16 firmware_flags; /* pointer to backlight encoder */ struct radeon_encoder *bl_encoder; + + /* bitmask for active encoder frontends */ + uint32_t active_encoders; }; #define RADEON_MAX_BL_LEVEL 0xFF @@ -357,6 +370,7 @@ struct radeon_crtc { u32 wm_low; u32 wm_high; struct drm_display_mode hw_mode; + enum radeon_output_csc output_csc; }; struct radeon_encoder_primary_dac { @@ -426,12 +440,24 @@ struct radeon_encoder_atom_dig { uint8_t backlight_level; int panel_mode; struct radeon_afmt *afmt; + int active_mst_links; }; struct radeon_encoder_atom_dac { enum radeon_tv_std tv_std; }; +struct radeon_encoder_mst { + int crtc; + struct radeon_encoder *primary; + struct radeon_connector *connector; + struct drm_dp_mst_port *port; + int pbn; + int fe; + bool fe_from_be; + bool enc_active; +}; + struct radeon_encoder { struct drm_encoder base; uint32_t encoder_enum; @@ -450,6 +476,11 @@ struct radeon_encoder { bool is_ext_encoder; u16 caps; struct radeon_audio_funcs *audio; + enum radeon_output_csc output_csc; + bool can_mst; + uint32_t offset; + bool is_mst_encoder; + /* front end for this mst encoder */ }; struct radeon_connector_atom_dig { @@ -460,6 +491,7 @@ struct radeon_connector_atom_dig { int dp_clock; int dp_lane_count; bool edp_on; + bool is_mst; }; struct radeon_gpio_rec { @@ -503,6 +535,11 @@ enum radeon_connector_dither { RADEON_FMT_DITHER_ENABLE = 1, }; +struct stream_attribs { + uint16_t fe; + uint16_t slots; +}; + struct radeon_connector { struct drm_connector base; uint32_t connector_id; @@ -524,6 +561,14 @@ struct radeon_connector { enum radeon_connector_audio audio; enum radeon_connector_dither dither; int pixelclock_for_modeset; + bool is_mst_connector; + struct radeon_connector *mst_port; + struct drm_dp_mst_port *port; + struct drm_dp_mst_topology_mgr mst_mgr; + + struct radeon_encoder *mst_encoder; + struct stream_attribs cur_stream_attribs[6]; + int enabled_attribs; }; struct radeon_framebuffer { @@ -708,15 +753,26 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder, struct drm_connector *connector); +int radeon_dp_get_max_link_rate(struct drm_connector *connector, + u8 *dpcd); extern void radeon_dp_set_rx_power_state(struct drm_connector *connector, u8 power_state); extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector); +extern ssize_t +radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg); + extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); +extern void atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override); extern void radeon_atom_encoder_init(struct radeon_device *rdev); extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev); extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set); +extern void atombios_dig_transmitter_setup2(struct drm_encoder *encoder, + int action, uint8_t lane_num, + uint8_t lane_set, int fe); +extern void atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder, + int fe); extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder); void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); @@ -929,7 +985,23 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) void radeon_fb_output_poll_changed(struct radeon_device *rdev); void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id); + +void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector); +void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector); + void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled); + +/* mst */ +int radeon_dp_mst_init(struct radeon_connector *radeon_connector); +int radeon_dp_mst_probe(struct radeon_connector *radeon_connector); +int radeon_dp_mst_check_status(struct radeon_connector *radeon_connector); +int radeon_mst_debugfs_init(struct radeon_device *rdev); +void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode); + +void radeon_setup_mst_connector(struct drm_device *dev); + +int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx); +void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx); #endif diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 33cf410..c1ba83a 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -837,12 +837,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work) radeon_pm_compute_clocks(rdev); } -static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, - enum radeon_pm_state_type dpm_state) +static bool radeon_dpm_single_display(struct radeon_device *rdev) { - int i; - struct radeon_ps *ps; - u32 ui_class; bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? true : false; @@ -858,6 +854,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) single_display = false; + return single_display; +} + +static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, + enum radeon_pm_state_type dpm_state) +{ + int i; + struct radeon_ps *ps; + u32 ui_class; + bool single_display = radeon_dpm_single_display(rdev); + /* certain older asics have a separare 3D performance state, * so try that first if the user selected performance */ @@ -983,6 +990,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) struct radeon_ps *ps; enum radeon_pm_state_type dpm_state; int ret; + bool single_display = radeon_dpm_single_display(rdev); /* if dpm init failed */ if (!rdev->pm.dpm_enabled) @@ -1007,6 +1015,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) /* vce just modifies an existing state so force a change */ if (ps->vce_active != rdev->pm.dpm.vce_active) goto force; + /* user has made a display change (such as timing) */ + if (rdev->pm.dpm.single_display != single_display) + goto force; if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { /* for pre-BTC and APUs if the num crtcs changed but state is the same, * all we need to do is update the display configuration. @@ -1069,6 +1080,7 @@ force: rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; + rdev->pm.dpm.single_display = single_display; /* wait for the rings to drain */ for (i = 0; i < RADEON_NUM_RINGS; i++) { diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 2456f69..8c78723 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -495,7 +495,7 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); seq_printf(m, "%u dwords in ring\n", count); - if (!ring->ready) + if (!ring->ring) return 0; /* print 8 dw before current rptr as often it's the last executed diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index d02aa1d..b292aca 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + /* double check that we don't free the table twice */ + if (!ttm->sg->sgl) + return; + /* free the sg table and pages again */ dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index 9031f4b..cb0afe7 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c @@ -1001,6 +1001,28 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde ps->sclk_high, ps->max_voltage); } +/* get the current sclk in 10 khz units */ +u32 rs780_dpm_get_current_sclk(struct radeon_device *rdev) +{ + u32 current_fb_div = RREG32(FVTHROT_STATUS_REG0) & CURRENT_FEEDBACK_DIV_MASK; + u32 func_cntl = RREG32(CG_SPLL_FUNC_CNTL); + u32 ref_div = ((func_cntl & SPLL_REF_DIV_MASK) >> SPLL_REF_DIV_SHIFT) + 1; + u32 post_div = ((func_cntl & SPLL_SW_HILEN_MASK) >> SPLL_SW_HILEN_SHIFT) + 1 + + ((func_cntl & SPLL_SW_LOLEN_MASK) >> SPLL_SW_LOLEN_SHIFT) + 1; + u32 sclk = (rdev->clock.spll.reference_freq * current_fb_div) / + (post_div * ref_div); + + return sclk; +} + +/* get the current mclk in 10 khz units */ +u32 rs780_dpm_get_current_mclk(struct radeon_device *rdev) +{ + struct igp_power_info *pi = rs780_get_pi(rdev); + + return pi->bootup_uma_clk; +} + int rs780_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level) { diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 6a5c233..97e5a6f 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c @@ -2050,6 +2050,52 @@ void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rde } } +/* get the current sclk in 10 khz units */ +u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev) +{ + struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct rv6xx_ps *ps = rv6xx_get_ps(rps); + struct rv6xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> + CURRENT_PROFILE_INDEX_SHIFT; + + if (current_index > 2) { + return 0; + } else { + if (current_index == 0) + pl = &ps->low; + else if (current_index == 1) + pl = &ps->medium; + else /* current_index == 2 */ + pl = &ps->high; + return pl->sclk; + } +} + +/* get the current mclk in 10 khz units */ +u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev) +{ + struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct rv6xx_ps *ps = rv6xx_get_ps(rps); + struct rv6xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> + CURRENT_PROFILE_INDEX_SHIFT; + + if (current_index > 2) { + return 0; + } else { + if (current_index == 0) + pl = &ps->low; + else if (current_index == 1) + pl = &ps->medium; + else /* current_index == 2 */ + pl = &ps->high; + return pl->mclk; + } +} + void rv6xx_dpm_fini(struct radeon_device *rdev) { int i; diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 3067326..b9c7707 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2492,6 +2492,50 @@ void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rde } } +u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev) +{ + struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct rv7xx_ps *ps = rv770_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> + CURRENT_PROFILE_INDEX_SHIFT; + + if (current_index > 2) { + return 0; + } else { + if (current_index == 0) + pl = &ps->low; + else if (current_index == 1) + pl = &ps->medium; + else /* current_index == 2 */ + pl = &ps->high; + return pl->sclk; + } +} + +u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev) +{ + struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct rv7xx_ps *ps = rv770_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> + CURRENT_PROFILE_INDEX_SHIFT; + + if (current_index > 2) { + return 0; + } else { + if (current_index == 0) + pl = &ps->low; + else if (current_index == 1) + pl = &ps->medium; + else /* current_index == 2 */ + pl = &ps->high; + return pl->mclk; + } +} + void rv770_dpm_fini(struct radeon_device *rdev) { int i; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index a7fb273..b1d74bc 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1264,6 +1264,36 @@ static void si_init_golden_registers(struct radeon_device *rdev) } } +/** + * si_get_allowed_info_register - fetch the register for the info ioctl + * + * @rdev: radeon_device pointer + * @reg: register offset in bytes + * @val: register value + * + * Returns 0 for success or -EINVAL for an invalid register + * + */ +int si_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val) +{ + switch (reg) { + case GRBM_STATUS: + case GRBM_STATUS2: + case GRBM_STATUS_SE0: + case GRBM_STATUS_SE1: + case SRBM_STATUS: + case SRBM_STATUS2: + case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET): + case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET): + case UVD_STATUS: + *val = RREG32(reg); + return 0; + default: + return -EINVAL; + } +} + #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) @@ -6055,12 +6085,12 @@ int si_irq_set(struct radeon_device *rdev) (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); if (!ASIC_IS_NODCE(rdev)) { - hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); } dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; @@ -6123,27 +6153,27 @@ int si_irq_set(struct radeon_device *rdev) } if (rdev->irq.hpd[0]) { DRM_DEBUG("si_irq_set: hpd 1\n"); - hpd1 |= DC_HPDx_INT_EN; + hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[1]) { DRM_DEBUG("si_irq_set: hpd 2\n"); - hpd2 |= DC_HPDx_INT_EN; + hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[2]) { DRM_DEBUG("si_irq_set: hpd 3\n"); - hpd3 |= DC_HPDx_INT_EN; + hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[3]) { DRM_DEBUG("si_irq_set: hpd 4\n"); - hpd4 |= DC_HPDx_INT_EN; + hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[4]) { DRM_DEBUG("si_irq_set: hpd 5\n"); - hpd5 |= DC_HPDx_INT_EN; + hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[5]) { DRM_DEBUG("si_irq_set: hpd 6\n"); - hpd6 |= DC_HPDx_INT_EN; + hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } WREG32(CP_INT_CNTL_RING0, cp_int_cntl); @@ -6306,6 +6336,37 @@ static inline void si_irq_ack(struct radeon_device *rdev) tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } + + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { + tmp = RREG32(DC_HPD1_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD1_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + tmp = RREG32(DC_HPD2_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD2_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + tmp = RREG32(DC_HPD3_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD3_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + tmp = RREG32(DC_HPD4_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD4_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } } static void si_irq_disable(struct radeon_device *rdev) @@ -6371,6 +6432,7 @@ int si_irq_process(struct radeon_device *rdev) u32 src_id, src_data, ring_id; u32 ring_index; bool queue_hotplug = false; + bool queue_dp = false; bool queue_thermal = false; u32 status, addr; @@ -6611,6 +6673,48 @@ restart_ih: DRM_DEBUG("IH: HPD6\n"); } break; + case 6: + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 1\n"); + } + break; + case 7: + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 2\n"); + } + break; + case 8: + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 3\n"); + } + break; + case 9: + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 4\n"); + } + break; + case 10: + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 5\n"); + } + break; + case 11: + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 6\n"); + } + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; @@ -6693,6 +6797,8 @@ restart_ih: rptr &= rdev->ih.ptr_mask; WREG32(IH_RB_RPTR, rptr); } + if (queue_dp) + schedule_work(&rdev->dp_work); if (queue_hotplug) schedule_work(&rdev->hotplug_work); if (queue_thermal && rdev->pm.dpm_enabled) diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 7be1165..b35bccf 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -6993,3 +6993,39 @@ void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); } } + +u32 si_dpm_get_current_sclk(struct radeon_device *rdev) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; + struct ni_ps *ps = ni_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> + CURRENT_STATE_INDEX_SHIFT; + + if (current_index >= ps->performance_level_count) { + return 0; + } else { + pl = &ps->performance_levels[current_index]; + return pl->sclk; + } +} + +u32 si_dpm_get_current_mclk(struct radeon_device *rdev) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; + struct ni_ps *ps = ni_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> + CURRENT_STATE_INDEX_SHIFT; + + if (current_index >= ps->performance_level_count) { + return 0; + } else { + pl = &ps->performance_levels[current_index]; + return pl->mclk; + } +} diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 99a9835..3afac30 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -1556,6 +1556,7 @@ #define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54 #define UVD_RBC_RB_RPTR 0xF690 #define UVD_RBC_RB_WPTR 0xF694 +#define UVD_STATUS 0xf6bc #define UVD_CGC_CTRL 0xF4B0 # define DCM (1 << 0) diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 25fd4ce..cd08628 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1837,6 +1837,34 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev } } +u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev) +{ + struct sumo_power_info *pi = sumo_get_pi(rdev); + struct radeon_ps *rps = &pi->current_rps; + struct sumo_ps *ps = sumo_get_ps(rps); + struct sumo_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) >> + CURR_INDEX_SHIFT; + + if (current_index == BOOST_DPM_LEVEL) { + pl = &pi->boost_pl; + return pl->sclk; + } else if (current_index >= ps->num_levels) { + return 0; + } else { + pl = &ps->levels[current_index]; + return pl->sclk; + } +} + +u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev) +{ + struct sumo_power_info *pi = sumo_get_pi(rdev); + + return pi->sys_info.bootup_uma_clk; +} + void sumo_dpm_fini(struct radeon_device *rdev) { int i; diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 38dacb7..a5b02c5 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1964,6 +1964,31 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r } } +u32 trinity_dpm_get_current_sclk(struct radeon_device *rdev) +{ + struct trinity_power_info *pi = trinity_get_pi(rdev); + struct radeon_ps *rps = &pi->current_rps; + struct trinity_ps *ps = trinity_get_ps(rps); + struct trinity_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) >> + CURRENT_STATE_SHIFT; + + if (current_index >= ps->num_levels) { + return 0; + } else { + pl = &ps->levels[current_index]; + return pl->sclk; + } +} + +u32 trinity_dpm_get_current_mclk(struct radeon_device *rdev) +{ + struct trinity_power_info *pi = trinity_get_pi(rdev); + + return pi->sys_info.bootup_uma_clk; +} + void trinity_dpm_fini(struct radeon_device *rdev) { int i; diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c index 1ac7bb8..fbbe78f 100644 --- a/drivers/gpu/drm/radeon/vce_v2_0.c +++ b/drivers/gpu/drm/radeon/vce_v2_0.c @@ -156,6 +156,9 @@ int vce_v2_0_resume(struct radeon_device *rdev) WREG32(VCE_LMI_SWAP_CNTL1, 0); WREG32(VCE_LMI_VM_CTRL, 0); + WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8); + + addr &= 0xff; size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size); WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); WREG32(VCE_VCPU_CACHE_SIZE0, size); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 9e72133..7d0b8ef 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -486,8 +486,6 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc) unsigned long flags; if (event) { - event->pipe = rcrtc->index; - WARN_ON(drm_crtc_vblank_get(crtc) != 0); spin_lock_irqsave(&dev->event_lock, flags); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 1d9e4f8..da1216a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -252,7 +252,8 @@ static const struct file_operations rcar_du_fops = { }; static struct drm_driver rcar_du_driver = { - .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME, + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME + | DRIVER_ATOMIC, .load = rcar_du_load, .unload = rcar_du_unload, .preclose = rcar_du_preclose, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index 35a2f04..210e5c3 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -316,6 +316,9 @@ rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane) static void rcar_du_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { + if (state->fb) + drm_framebuffer_unreference(state->fb); + kfree(to_rcar_du_plane_state(state)); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 21a481b..30da781 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -129,6 +129,7 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) struct rockchip_drm_private *private; struct dma_iommu_mapping *mapping; struct device *dev = drm_dev->dev; + struct drm_connector *connector; int ret; private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); @@ -171,6 +172,23 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) if (ret) goto err_detach_device; + /* + * All components are now added, we can publish the connector sysfs + * entries to userspace. This will generate hotplug events and so + * userspace will expect to be able to access DRM at this point. + */ + list_for_each_entry(connector, &drm_dev->mode_config.connector_list, + head) { + ret = drm_connector_register(connector); + if (ret) { + dev_err(drm_dev->dev, + "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n", + connector->base.id, + connector->name, ret); + goto err_unbind; + } + } + /* init kms poll for handling hpd */ drm_kms_helper_poll_init(drm_dev); @@ -200,6 +218,7 @@ err_vblank_cleanup: drm_vblank_cleanup(drm_dev); err_kms_helper_poll_fini: drm_kms_helper_poll_fini(drm_dev); +err_unbind: component_unbind_all(dev, drm_dev); err_detach_device: arm_iommu_detach_device(dev); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c index a5d889a..5b0dc0f 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c @@ -71,7 +71,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, size = mode_cmd.pitches[0] * mode_cmd.height; - rk_obj = rockchip_gem_create_object(dev, size); + rk_obj = rockchip_gem_create_object(dev, size, true); if (IS_ERR(rk_obj)) return -ENOMEM; @@ -106,7 +106,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, fb = helper->fb; drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); - drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); + drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); offset = fbi->var.xoffset * bytes_per_pixel; offset += fbi->var.yoffset * fb->pitches[0]; @@ -119,6 +119,9 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n", fb->width, fb->height, fb->depth, rk_obj->kvaddr, offset, size); + + fbi->skip_vt_switch = true; + return 0; err_drm_framebuffer_unref: diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 7ca8799e..eb2282c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -22,7 +22,8 @@ #include "rockchip_drm_drv.h" #include "rockchip_drm_gem.h" -static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj) +static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, + bool alloc_kmap) { struct drm_gem_object *obj = &rk_obj->base; struct drm_device *drm = obj->dev; @@ -30,7 +31,9 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj) init_dma_attrs(&rk_obj->dma_attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs); - /* TODO(djkurtz): Use DMA_ATTR_NO_KERNEL_MAPPING except for fbdev */ + if (!alloc_kmap) + dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs); + rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, &rk_obj->dma_addr, GFP_KERNEL, &rk_obj->dma_attrs); @@ -103,7 +106,8 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) } struct rockchip_gem_object * - rockchip_gem_create_object(struct drm_device *drm, unsigned int size) + rockchip_gem_create_object(struct drm_device *drm, unsigned int size, + bool alloc_kmap) { struct rockchip_gem_object *rk_obj; struct drm_gem_object *obj; @@ -119,7 +123,7 @@ struct rockchip_gem_object * drm_gem_private_object_init(drm, obj, size); - ret = rockchip_gem_alloc_buf(rk_obj); + ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap); if (ret) goto err_free_rk_obj; @@ -163,7 +167,7 @@ rockchip_gem_create_with_handle(struct drm_file *file_priv, struct drm_gem_object *obj; int ret; - rk_obj = rockchip_gem_create_object(drm, size); + rk_obj = rockchip_gem_create_object(drm, size, false); if (IS_ERR(rk_obj)) return ERR_CAST(rk_obj); @@ -282,6 +286,9 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs)) + return NULL; + return rk_obj->kvaddr; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index 67bcebe..ad22618 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -41,7 +41,8 @@ int rockchip_gem_mmap_buf(struct drm_gem_object *obj, struct vm_area_struct *vma); struct rockchip_gem_object * - rockchip_gem_create_object(struct drm_device *drm, unsigned int size); + rockchip_gem_create_object(struct drm_device *drm, unsigned int size, + bool alloc_kmap); void rockchip_gem_free_object(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 9a5c571..ccb0ce0 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -81,7 +81,7 @@ struct vop { struct drm_crtc crtc; struct device *dev; struct drm_device *drm_dev; - unsigned int dpms; + bool is_enabled; int connector_type; int connector_out_mode; @@ -89,6 +89,7 @@ struct vop { /* mutex vsync_ work */ struct mutex vsync_mutex; bool vsync_work_pending; + struct completion dsp_hold_completion; const struct vop_data *data; @@ -382,11 +383,50 @@ static bool is_alpha_support(uint32_t format) } } +static void vop_dsp_hold_valid_irq_enable(struct vop *vop) +{ + unsigned long flags; + + if (WARN_ON(!vop->is_enabled)) + return; + + spin_lock_irqsave(&vop->irq_lock, flags); + + vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK, + DSP_HOLD_VALID_INTR_EN(1)); + + spin_unlock_irqrestore(&vop->irq_lock, flags); +} + +static void vop_dsp_hold_valid_irq_disable(struct vop *vop) +{ + unsigned long flags; + + if (WARN_ON(!vop->is_enabled)) + return; + + spin_lock_irqsave(&vop->irq_lock, flags); + + vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK, + DSP_HOLD_VALID_INTR_EN(0)); + + spin_unlock_irqrestore(&vop->irq_lock, flags); +} + static void vop_enable(struct drm_crtc *crtc) { struct vop *vop = to_vop(crtc); int ret; + if (vop->is_enabled) + return; + + ret = pm_runtime_get_sync(vop->dev); + if (ret < 0) { + dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); + return; + } + ret = clk_enable(vop->hclk); if (ret < 0) { dev_err(vop->dev, "failed to enable hclk - %d\n", ret); @@ -417,6 +457,11 @@ static void vop_enable(struct drm_crtc *crtc) goto err_disable_aclk; } + /* + * At here, vop clock & iommu is enable, R/W vop regs would be safe. + */ + vop->is_enabled = true; + spin_lock(&vop->reg_lock); VOP_CTRL_SET(vop, standby, 0); @@ -441,28 +486,44 @@ static void vop_disable(struct drm_crtc *crtc) { struct vop *vop = to_vop(crtc); - drm_vblank_off(crtc->dev, vop->pipe); + if (!vop->is_enabled) + return; - disable_irq(vop->irq); + drm_vblank_off(crtc->dev, vop->pipe); /* - * TODO: Since standby doesn't take effect until the next vblank, - * when we turn off dclk below, the vop is probably still active. + * Vop standby will take effect at end of current frame, + * if dsp hold valid irq happen, it means standby complete. + * + * we must wait standby complete when we want to disable aclk, + * if not, memory bus maybe dead. */ + reinit_completion(&vop->dsp_hold_completion); + vop_dsp_hold_valid_irq_enable(vop); + spin_lock(&vop->reg_lock); VOP_CTRL_SET(vop, standby, 1); spin_unlock(&vop->reg_lock); + + wait_for_completion(&vop->dsp_hold_completion); + + vop_dsp_hold_valid_irq_disable(vop); + + disable_irq(vop->irq); + + vop->is_enabled = false; + /* - * disable dclk to stop frame scan, so we can safely detach iommu, + * vop standby complete, so iommu detach is safe. */ - clk_disable(vop->dclk); - rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev); + clk_disable(vop->dclk); clk_disable(vop->aclk); clk_disable(vop->hclk); + pm_runtime_put(vop->dev); } /* @@ -742,7 +803,7 @@ static int vop_crtc_enable_vblank(struct drm_crtc *crtc) struct vop *vop = to_vop(crtc); unsigned long flags; - if (vop->dpms != DRM_MODE_DPMS_ON) + if (!vop->is_enabled) return -EPERM; spin_lock_irqsave(&vop->irq_lock, flags); @@ -759,8 +820,9 @@ static void vop_crtc_disable_vblank(struct drm_crtc *crtc) struct vop *vop = to_vop(crtc); unsigned long flags; - if (vop->dpms != DRM_MODE_DPMS_ON) + if (!vop->is_enabled) return; + spin_lock_irqsave(&vop->irq_lock, flags); vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0)); spin_unlock_irqrestore(&vop->irq_lock, flags); @@ -773,15 +835,8 @@ static const struct rockchip_crtc_funcs private_crtc_funcs = { static void vop_crtc_dpms(struct drm_crtc *crtc, int mode) { - struct vop *vop = to_vop(crtc); - DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); - if (vop->dpms == mode) { - DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); - return; - } - switch (mode) { case DRM_MODE_DPMS_ON: vop_enable(crtc); @@ -795,8 +850,6 @@ static void vop_crtc_dpms(struct drm_crtc *crtc, int mode) DRM_DEBUG_KMS("unspecified mode %d\n", mode); break; } - - vop->dpms = mode; } static void vop_crtc_prepare(struct drm_crtc *crtc) @@ -847,7 +900,7 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc, u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start; u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start; u16 vact_end = vact_st + vdisplay; - int ret; + int ret, ret_clk; uint32_t val; /* @@ -869,13 +922,14 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc, default: DRM_ERROR("unsupport connector_type[%d]\n", vop->connector_type); - return -EINVAL; + ret = -EINVAL; + goto out; }; VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode); val = 0x8; - val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0; - val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? (1 << 1) : 0; + val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1; + val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1); VOP_CTRL_SET(vop, pin_pol, val); VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len); @@ -892,7 +946,7 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc, ret = vop_crtc_mode_set_base(crtc, x, y, fb); if (ret) - return ret; + goto out; /* * reset dclk, take all mode config affect, so the clk would run in @@ -903,13 +957,14 @@ static int vop_crtc_mode_set(struct drm_crtc *crtc, reset_control_deassert(vop->dclk_rst); clk_set_rate(vop->dclk, adjusted_mode->clock * 1000); - ret = clk_enable(vop->dclk); - if (ret < 0) { - dev_err(vop->dev, "failed to enable dclk - %d\n", ret); - return ret; +out: + ret_clk = clk_enable(vop->dclk); + if (ret_clk < 0) { + dev_err(vop->dev, "failed to enable dclk - %d\n", ret_clk); + return ret_clk; } - return 0; + return ret; } static void vop_crtc_commit(struct drm_crtc *crtc) @@ -934,9 +989,9 @@ static int vop_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *old_fb = crtc->primary->fb; int ret; - /* when the page flip is requested, crtc's dpms should be on */ - if (vop->dpms > DRM_MODE_DPMS_ON) { - DRM_DEBUG("failed page flip request at dpms[%d].\n", vop->dpms); + /* when the page flip is requested, crtc should be on */ + if (!vop->is_enabled) { + DRM_DEBUG("page flip request rejected because crtc is off.\n"); return 0; } @@ -1081,6 +1136,7 @@ static irqreturn_t vop_isr(int irq, void *data) struct vop *vop = data; uint32_t intr0_reg, active_irqs; unsigned long flags; + int ret = IRQ_NONE; /* * INTR_CTRL0 register has interrupt status, enable and clear bits, we @@ -1099,15 +1155,23 @@ static irqreturn_t vop_isr(int irq, void *data) if (!active_irqs) return IRQ_NONE; - /* Only Frame Start Interrupt is enabled; other irqs are spurious. */ - if (!(active_irqs & FS_INTR)) { - DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs); - return IRQ_NONE; + if (active_irqs & DSP_HOLD_VALID_INTR) { + complete(&vop->dsp_hold_completion); + active_irqs &= ~DSP_HOLD_VALID_INTR; + ret = IRQ_HANDLED; + } + + if (active_irqs & FS_INTR) { + drm_handle_vblank(vop->drm_dev, vop->pipe); + active_irqs &= ~FS_INTR; + ret = (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED; } - drm_handle_vblank(vop->drm_dev, vop->pipe); + /* Unhandled irqs are spurious. */ + if (active_irqs) + DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs); - return (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED; + return ret; } static int vop_create_crtc(struct vop *vop) @@ -1189,6 +1253,7 @@ static int vop_create_crtc(struct vop *vop) goto err_cleanup_crtc; } + init_completion(&vop->dsp_hold_completion); crtc->port = port; vop->pipe = drm_crtc_index(crtc); rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe); @@ -1302,7 +1367,7 @@ static int vop_initial(struct vop *vop) clk_disable(vop->hclk); - vop->dpms = DRM_MODE_DPMS_OFF; + vop->is_enabled = false; return 0; diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c index e6f6ef7..6b641c5 100644 --- a/drivers/gpu/drm/sti/sti_drm_crtc.c +++ b/drivers/gpu/drm/sti/sti_drm_crtc.c @@ -9,6 +9,8 @@ #include <linux/clk.h> #include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_plane_helper.h> @@ -77,22 +79,18 @@ static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc, } static int -sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb) +sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct sti_mixer *mixer = to_sti_mixer(crtc); struct device *dev = mixer->dev; struct sti_compositor *compo = dev_get_drvdata(dev); - struct sti_layer *layer; struct clk *clk; int rate = mode->clock * 1000; int res; - unsigned int w, h; - DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d mode:%d (%s)\n", + DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer), - crtc->primary->fb->base.id, mode->base.id, mode->name); + mode->base.id, mode->name); DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", mode->vrefresh, mode->clock, @@ -122,72 +120,13 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ? compo->vtg_main : compo->vtg_aux, &crtc->mode); - /* a GDP is reserved to the CRTC FB */ - layer = to_sti_layer(crtc->primary); - if (!layer) { - DRM_ERROR("Can not find GDP0)\n"); - return -EINVAL; - } - - /* copy the mode data adjusted by mode_fixup() into crtc->mode - * so that hardware can be set to proper mode - */ - memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); - - res = sti_mixer_set_layer_depth(mixer, layer); - if (res) { - DRM_ERROR("Can not set layer depth\n"); - return -EINVAL; - } res = sti_mixer_active_video_area(mixer, &crtc->mode); if (res) { DRM_ERROR("Can not set active video area\n"); return -EINVAL; } - w = crtc->primary->fb->width - x; - h = crtc->primary->fb->height - y; - - return sti_layer_prepare(layer, crtc, - crtc->primary->fb, &crtc->mode, - mixer->id, 0, 0, w, h, x, y, w, h); -} - -static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) -{ - struct sti_mixer *mixer = to_sti_mixer(crtc); - struct sti_layer *layer; - unsigned int w, h; - int ret; - - DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d (%d,%d)\n", - crtc->base.id, sti_mixer_to_str(mixer), - crtc->primary->fb->base.id, x, y); - - /* GDP is reserved to the CRTC FB */ - layer = to_sti_layer(crtc->primary); - if (!layer) { - DRM_ERROR("Can not find GDP0)\n"); - ret = -EINVAL; - goto out; - } - - w = crtc->primary->fb->width - crtc->x; - h = crtc->primary->fb->height - crtc->y; - - ret = sti_layer_prepare(layer, crtc, - crtc->primary->fb, &crtc->mode, - mixer->id, 0, 0, w, h, - crtc->x, crtc->y, w, h); - if (ret) { - DRM_ERROR("Can not prepare layer\n"); - goto out; - } - - sti_drm_crtc_commit(crtc); -out: - return ret; + return res; } static void sti_drm_crtc_disable(struct drm_crtc *crtc) @@ -195,7 +134,6 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc) struct sti_mixer *mixer = to_sti_mixer(crtc); struct device *dev = mixer->dev; struct sti_compositor *compo = dev_get_drvdata(dev); - struct sti_layer *layer; if (!mixer->enabled) return; @@ -205,24 +143,6 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc) /* Disable Background */ sti_mixer_set_background_status(mixer, false); - /* Disable GDP */ - layer = to_sti_layer(crtc->primary); - if (!layer) { - DRM_ERROR("Cannot find GDP0\n"); - return; - } - - /* Disable layer at mixer level */ - if (sti_mixer_set_layer_status(mixer, layer, false)) - DRM_ERROR("Can not disable %s layer at mixer\n", - sti_layer_to_str(layer)); - - /* Wait a while to be sure that a Vsync event is received */ - msleep(WAIT_NEXT_VSYNC_MS); - - /* Then disable layer itself */ - sti_layer_disable(layer); - drm_crtc_vblank_off(crtc); /* Disable pixel clock and compo IP clocks */ @@ -237,64 +157,44 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc) mixer->enabled = false; } -static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { - .dpms = sti_drm_crtc_dpms, - .prepare = sti_drm_crtc_prepare, - .commit = sti_drm_crtc_commit, - .mode_fixup = sti_drm_crtc_mode_fixup, - .mode_set = sti_drm_crtc_mode_set, - .mode_set_base = sti_drm_crtc_mode_set_base, - .disable = sti_drm_crtc_disable, -}; +static void +sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + sti_drm_crtc_prepare(crtc); + sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode); +} -static int sti_drm_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) +static void sti_drm_atomic_begin(struct drm_crtc *crtc) { - struct drm_device *drm_dev = crtc->dev; - struct drm_framebuffer *old_fb; struct sti_mixer *mixer = to_sti_mixer(crtc); - unsigned long flags; - int ret; - DRM_DEBUG_KMS("fb %d --> fb %d\n", - crtc->primary->fb->base.id, fb->base.id); + if (crtc->state->event) { + crtc->state->event->pipe = drm_crtc_index(crtc); - mutex_lock(&drm_dev->struct_mutex); + WARN_ON(drm_crtc_vblank_get(crtc) != 0); - old_fb = crtc->primary->fb; - crtc->primary->fb = fb; - ret = sti_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb); - if (ret) { - DRM_ERROR("failed\n"); - crtc->primary->fb = old_fb; - goto out; + mixer->pending_event = crtc->state->event; + crtc->state->event = NULL; } +} - if (event) { - event->pipe = mixer->id; - - ret = drm_vblank_get(drm_dev, event->pipe); - if (ret) { - DRM_ERROR("Cannot get vblank\n"); - goto out; - } - - spin_lock_irqsave(&drm_dev->event_lock, flags); - if (mixer->pending_event) { - drm_vblank_put(drm_dev, event->pipe); - ret = -EBUSY; - } else { - mixer->pending_event = event; - } - spin_unlock_irqrestore(&drm_dev->event_lock, flags); - } -out: - mutex_unlock(&drm_dev->struct_mutex); - return ret; +static void sti_drm_atomic_flush(struct drm_crtc *crtc) +{ } +static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { + .dpms = sti_drm_crtc_dpms, + .prepare = sti_drm_crtc_prepare, + .commit = sti_drm_crtc_commit, + .mode_fixup = sti_drm_crtc_mode_fixup, + .mode_set = drm_helper_crtc_mode_set, + .mode_set_nofb = sti_drm_crtc_mode_set_nofb, + .mode_set_base = drm_helper_crtc_mode_set_base, + .disable = sti_drm_crtc_disable, + .atomic_begin = sti_drm_atomic_begin, + .atomic_flush = sti_drm_atomic_flush, +}; + static void sti_drm_crtc_destroy(struct drm_crtc *crtc) { DRM_DEBUG_KMS("\n"); @@ -380,10 +280,13 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) EXPORT_SYMBOL(sti_drm_crtc_disable_vblank); static struct drm_crtc_funcs sti_crtc_funcs = { - .set_config = drm_crtc_helper_set_config, - .page_flip = sti_drm_crtc_page_flip, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, .destroy = sti_drm_crtc_destroy, .set_property = sti_drm_crtc_set_property, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; bool sti_drm_crtc_is_main(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c index 5239fa1..59d558b 100644 --- a/drivers/gpu/drm/sti/sti_drm_drv.c +++ b/drivers/gpu/drm/sti/sti_drm_drv.c @@ -12,6 +12,8 @@ #include <linux/module.h> #include <linux/of_platform.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> @@ -28,8 +30,87 @@ #define STI_MAX_FB_HEIGHT 4096 #define STI_MAX_FB_WIDTH 4096 +static void sti_drm_atomic_schedule(struct sti_drm_private *private, + struct drm_atomic_state *state) +{ + private->commit.state = state; + schedule_work(&private->commit.work); +} + +static void sti_drm_atomic_complete(struct sti_drm_private *private, + struct drm_atomic_state *state) +{ + struct drm_device *drm = private->drm_dev; + + /* + * Everything below can be run asynchronously without the need to grab + * any modeset locks at all under one condition: It must be guaranteed + * that the asynchronous work has either been cancelled (if the driver + * supports it, which at least requires that the framebuffers get + * cleaned up with drm_atomic_helper_cleanup_planes()) or completed + * before the new state gets committed on the software side with + * drm_atomic_helper_swap_state(). + * + * This scheme allows new atomic state updates to be prepared and + * checked in parallel to the asynchronous completion of the previous + * update. Which is important since compositors need to figure out the + * composition of the next frame right after having submitted the + * current layout. + */ + + drm_atomic_helper_commit_modeset_disables(drm, state); + drm_atomic_helper_commit_planes(drm, state); + drm_atomic_helper_commit_modeset_enables(drm, state); + + drm_atomic_helper_wait_for_vblanks(drm, state); + + drm_atomic_helper_cleanup_planes(drm, state); + drm_atomic_state_free(state); +} + +static void sti_drm_atomic_work(struct work_struct *work) +{ + struct sti_drm_private *private = container_of(work, + struct sti_drm_private, commit.work); + + sti_drm_atomic_complete(private, private->commit.state); +} + +static int sti_drm_atomic_commit(struct drm_device *drm, + struct drm_atomic_state *state, bool async) +{ + struct sti_drm_private *private = drm->dev_private; + int err; + + err = drm_atomic_helper_prepare_planes(drm, state); + if (err) + return err; + + /* serialize outstanding asynchronous commits */ + mutex_lock(&private->commit.lock); + flush_work(&private->commit.work); + + /* + * This is the point of no return - everything below never fails except + * when the hw goes bonghits. Which means we can commit the new state on + * the software side now. + */ + + drm_atomic_helper_swap_state(drm, state); + + if (async) + sti_drm_atomic_schedule(private, state); + else + sti_drm_atomic_complete(private, state); + + mutex_unlock(&private->commit.lock); + return 0; +} + static struct drm_mode_config_funcs sti_drm_mode_config_funcs = { .fb_create = drm_fb_cma_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = sti_drm_atomic_commit, }; static void sti_drm_mode_config_init(struct drm_device *dev) @@ -61,6 +142,9 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags) dev->dev_private = (void *)private; private->drm_dev = dev; + mutex_init(&private->commit.lock); + INIT_WORK(&private->commit.work, sti_drm_atomic_work); + drm_mode_config_init(dev); drm_kms_helper_poll_init(dev); @@ -74,7 +158,7 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags) return ret; } - drm_helper_disable_unused_functions(dev); + drm_mode_config_reset(dev); #ifdef CONFIG_DRM_STI_FBDEV drm_fbdev_cma_init(dev, 32, diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drm_drv.h index ec5e2eb..c413aa3 100644 --- a/drivers/gpu/drm/sti/sti_drm_drv.h +++ b/drivers/gpu/drm/sti/sti_drm_drv.h @@ -24,6 +24,12 @@ struct sti_drm_private { struct sti_compositor *compo; struct drm_property *plane_zorder_property; struct drm_device *drm_dev; + + struct { + struct drm_atomic_state *state; + struct work_struct work; + struct mutex lock; + } commit; }; #endif diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c index bb6a293..64d4ed4 100644 --- a/drivers/gpu/drm/sti/sti_drm_plane.c +++ b/drivers/gpu/drm/sti/sti_drm_plane.c @@ -6,6 +6,10 @@ * License terms: GNU General Public License (GPL), version 2 */ +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_plane_helper.h> + #include "sti_compositor.h" #include "sti_drm_drv.h" #include "sti_drm_plane.h" @@ -33,9 +37,9 @@ sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct sti_mixer *mixer = to_sti_mixer(crtc); int res; - DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s) drm fb:%d\n", + DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer), - plane->base.id, sti_layer_to_str(layer), fb->base.id); + plane->base.id, sti_layer_to_str(layer)); DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y); res = sti_mixer_set_layer_depth(mixer, layer); @@ -110,7 +114,7 @@ static void sti_drm_plane_destroy(struct drm_plane *plane) { DRM_DEBUG_DRIVER("\n"); - sti_drm_disable_plane(plane); + drm_plane_helper_disable(plane); drm_plane_cleanup(plane); } @@ -133,10 +137,58 @@ static int sti_drm_plane_set_property(struct drm_plane *plane, } static struct drm_plane_funcs sti_drm_plane_funcs = { - .update_plane = sti_drm_update_plane, - .disable_plane = sti_drm_disable_plane, + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, .destroy = sti_drm_plane_destroy, .set_property = sti_drm_plane_set_property, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static int sti_drm_plane_prepare_fb(struct drm_plane *plane, + struct drm_framebuffer *fb, + const struct drm_plane_state *new_state) +{ + return 0; +} + +static void sti_drm_plane_cleanup_fb(struct drm_plane *plane, + struct drm_framebuffer *fb, + const struct drm_plane_state *old_fb) +{ +} + +static int sti_drm_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + return 0; +} + +static void sti_drm_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *oldstate) +{ + struct drm_plane_state *state = plane->state; + + sti_drm_update_plane(plane, state->crtc, state->fb, + state->crtc_x, state->crtc_y, + state->crtc_w, state->crtc_h, + state->src_x, state->src_y, + state->src_w, state->src_h); +} + +static void sti_drm_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *oldstate) +{ + sti_drm_disable_plane(plane); +} + +static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = { + .prepare_fb = sti_drm_plane_prepare_fb, + .cleanup_fb = sti_drm_plane_cleanup_fb, + .atomic_check = sti_drm_plane_atomic_check, + .atomic_update = sti_drm_plane_atomic_update, + .atomic_disable = sti_drm_plane_atomic_disable, }; static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane, @@ -178,11 +230,13 @@ struct drm_plane *sti_drm_plane_init(struct drm_device *dev, return NULL; } + drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs); + for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++) if (sti_layer_default_zorder[i] == layer->desc) break; - default_zorder = i; + default_zorder = i + 1; if (type == DRM_PLANE_TYPE_OVERLAY) sti_drm_plane_attach_zorder_property(&layer->plane, diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index aeb5070..a9b678a 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -11,6 +11,7 @@ #include <linux/platform_device.h> #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_panel.h> @@ -364,10 +365,13 @@ static void sti_dvo_connector_destroy(struct drm_connector *connector) } static struct drm_connector_funcs sti_dvo_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = sti_dvo_connector_detect, .destroy = sti_dvo_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev) diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index a9bbb08..598cd78 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -10,6 +10,7 @@ #include <linux/platform_device.h> #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> /* HDformatter registers */ @@ -611,10 +612,13 @@ static void sti_hda_connector_destroy(struct drm_connector *connector) } static struct drm_connector_funcs sti_hda_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = sti_hda_connector_detect, .destroy = sti_hda_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev) diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 1485ade..ae5424b 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -13,6 +13,7 @@ #include <linux/reset.h> #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> @@ -663,10 +664,13 @@ static void sti_hdmi_connector_destroy(struct drm_connector *connector) } static struct drm_connector_funcs sti_hdmi_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = sti_hdmi_connector_detect, .destroy = sti_hdmi_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev) diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index b7f7815..a287e4f 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -425,8 +425,8 @@ static void tegra_plane_reset(struct drm_plane *plane) { struct tegra_plane_state *state; - if (plane->state && plane->state->fb) - drm_framebuffer_unreference(plane->state->fb); + if (plane->state) + __drm_atomic_helper_plane_destroy_state(plane, plane->state); kfree(plane->state); plane->state = NULL; @@ -443,12 +443,14 @@ static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_pla struct tegra_plane_state *state = to_tegra_plane_state(plane->state); struct tegra_plane_state *copy; - copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + copy = kmalloc(sizeof(*copy), GFP_KERNEL); if (!copy) return NULL; - if (copy->base.fb) - drm_framebuffer_reference(copy->base.fb); + __drm_atomic_helper_plane_duplicate_state(plane, ©->base); + copy->tiling = state->tiling; + copy->format = state->format; + copy->swap = state->swap; return ©->base; } @@ -456,9 +458,7 @@ static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_pla static void tegra_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { - if (state->fb) - drm_framebuffer_unreference(state->fb); - + __drm_atomic_helper_plane_destroy_state(plane, state); kfree(state); } @@ -908,6 +908,15 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc) return 0; } +u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc) +{ + if (dc->syncpt) + return host1x_syncpt_read(dc->syncpt); + + /* fallback to software emulated VBLANK counter */ + return drm_crtc_vblank_count(&dc->base); +} + void tegra_dc_enable_vblank(struct tegra_dc *dc) { unsigned long value, flags; @@ -995,6 +1004,9 @@ static void tegra_crtc_reset(struct drm_crtc *crtc) { struct tegra_dc_state *state; + if (crtc->state) + __drm_atomic_helper_crtc_destroy_state(crtc, crtc->state); + kfree(crtc->state); crtc->state = NULL; @@ -1011,14 +1023,15 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc) struct tegra_dc_state *state = to_dc_state(crtc->state); struct tegra_dc_state *copy; - copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + copy = kmalloc(sizeof(*copy), GFP_KERNEL); if (!copy) return NULL; - copy->base.mode_changed = false; - copy->base.active_changed = false; - copy->base.planes_changed = false; - copy->base.event = NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, ©->base); + copy->clk = state->clk; + copy->pclk = state->pclk; + copy->div = state->div; + copy->planes = state->planes; return ©->base; } @@ -1026,6 +1039,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc) static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { + __drm_atomic_helper_crtc_destroy_state(crtc, state); kfree(state); } @@ -1152,26 +1166,18 @@ static int tegra_dc_set_timings(struct tegra_dc *dc, return 0; } -int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent, - unsigned long pclk, unsigned int div) -{ - u32 value; - int err; - - err = clk_set_parent(dc->clk, parent); - if (err < 0) { - dev_err(dc->dev, "failed to set parent clock: %d\n", err); - return err; - } - - DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), div); - - value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1; - tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); - - return 0; -} - +/** + * tegra_dc_state_setup_clock - check clock settings and store them in atomic + * state + * @dc: display controller + * @crtc_state: CRTC atomic state + * @clk: parent clock for display controller + * @pclk: pixel clock + * @div: shift clock divider + * + * Returns: + * 0 on success or a negative error-code on failure. + */ int tegra_dc_state_setup_clock(struct tegra_dc *dc, struct drm_crtc_state *crtc_state, struct clk *clk, unsigned long pclk, @@ -1179,6 +1185,9 @@ int tegra_dc_state_setup_clock(struct tegra_dc *dc, { struct tegra_dc_state *state = to_dc_state(crtc_state); + if (!clk_has_parent(dc->clk, clk)) + return -EINVAL; + state->clk = clk; state->pclk = pclk; state->div = div; @@ -1294,9 +1303,7 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc) static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = { .disable = tegra_crtc_disable, .mode_fixup = tegra_crtc_mode_fixup, - .mode_set = drm_helper_crtc_mode_set, .mode_set_nofb = tegra_crtc_mode_set_nofb, - .mode_set_base = drm_helper_crtc_mode_set_base, .prepare = tegra_crtc_prepare, .commit = tegra_crtc_commit, .atomic_check = tegra_crtc_atomic_check, @@ -1631,7 +1638,6 @@ static int tegra_dc_init(struct host1x_client *client) struct tegra_drm *tegra = drm->dev_private; struct drm_plane *primary = NULL; struct drm_plane *cursor = NULL; - unsigned int syncpt; u32 value; int err; @@ -1700,13 +1706,15 @@ static int tegra_dc_init(struct host1x_client *client) } /* initialize display controller */ - if (dc->pipe) - syncpt = SYNCPT_VBLANK1; - else - syncpt = SYNCPT_VBLANK0; + if (dc->syncpt) { + u32 syncpt = host1x_syncpt_id(dc->syncpt); - tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); - tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC); + value = SYNCPT_CNTRL_NO_STALL; + tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); + + value = SYNCPT_VSYNC_ENABLE | syncpt; + tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC); + } value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); @@ -1874,6 +1882,7 @@ static int tegra_dc_parse_dt(struct tegra_dc *dc) static int tegra_dc_probe(struct platform_device *pdev) { + unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED; const struct of_device_id *id; struct resource *regs; struct tegra_dc *dc; @@ -1965,6 +1974,10 @@ static int tegra_dc_probe(struct platform_device *pdev) return err; } + dc->syncpt = host1x_syncpt_request(&pdev->dev, flags); + if (!dc->syncpt) + dev_warn(&pdev->dev, "failed to allocate syncpoint\n"); + platform_set_drvdata(pdev, dc); return 0; @@ -1975,6 +1988,8 @@ static int tegra_dc_remove(struct platform_device *pdev) struct tegra_dc *dc = platform_get_drvdata(pdev); int err; + host1x_syncpt_free(dc->syncpt); + err = host1x_client_unregister(&dc->client); if (err < 0) { dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index 705c93b..55792da 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h @@ -12,6 +12,8 @@ #define DC_CMD_GENERAL_INCR_SYNCPT 0x000 #define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001 +#define SYNCPT_CNTRL_NO_STALL (1 << 8) +#define SYNCPT_CNTRL_SOFT_RESET (1 << 0) #define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002 #define DC_CMD_WIN_A_INCR_SYNCPT 0x008 #define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009 @@ -23,6 +25,7 @@ #define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019 #define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a #define DC_CMD_CONT_SYNCPT_VSYNC 0x028 +#define SYNCPT_VSYNC_ENABLE (1 << 8) #define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031 #define DC_CMD_DISPLAY_COMMAND 0x032 #define DISP_CTRL_MODE_STOP (0 << 5) @@ -438,8 +441,4 @@ #define DC_WINBUF_BD_UFLOW_STATUS 0xdca #define DC_WINBUF_CD_UFLOW_STATUS 0xfca -/* synchronization points */ -#define SYNCPT_VBLANK0 26 -#define SYNCPT_VBLANK1 27 - #endif /* TEGRA_DC_H */ diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 5f18807..1833abd 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -172,6 +172,10 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) */ drm->irq_enabled = true; + /* syncpoints are used for full 32-bit hardware VBLANK counters */ + drm->vblank_disable_immediate = true; + drm->max_vblank_count = 0xffffffff; + err = drm_vblank_init(drm, drm->mode_config.num_crtc); if (err < 0) goto device; @@ -813,12 +817,12 @@ static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe) { struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); + struct tegra_dc *dc = to_tegra_dc(crtc); if (!crtc) return 0; - /* TODO: implement real hardware counter using syncpoints */ - return drm_crtc_vblank_count(crtc); + return tegra_dc_get_vblank_counter(dc); } static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe) @@ -879,8 +883,18 @@ static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) return 0; } +static int tegra_debugfs_iova(struct seq_file *s, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)s->private; + struct drm_device *drm = node->minor->dev; + struct tegra_drm *tegra = drm->dev_private; + + return drm_mm_dump_table(s, &tegra->mm); +} + static struct drm_info_list tegra_debugfs_list[] = { { "framebuffers", tegra_debugfs_framebuffers, 0 }, + { "iova", tegra_debugfs_iova, 0 }, }; static int tegra_debugfs_init(struct drm_minor *minor) diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 8cb2dfe..659b2fc 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -106,6 +106,7 @@ struct tegra_output; struct tegra_dc { struct host1x_client client; + struct host1x_syncpt *syncpt; struct device *dev; spinlock_t lock; @@ -180,12 +181,11 @@ struct tegra_dc_window { }; /* from dc.c */ +u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc); void tegra_dc_enable_vblank(struct tegra_dc *dc); void tegra_dc_disable_vblank(struct tegra_dc *dc); void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file); void tegra_dc_commit(struct tegra_dc *dc); -int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent, - unsigned long pclk, unsigned int div); int tegra_dc_state_setup_clock(struct tegra_dc *dc, struct drm_crtc_state *crtc_state, struct clk *clk, unsigned long pclk, diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 7eaaee74..06ab178 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -952,7 +952,7 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder, } tegra_hdmi_writel(hdmi, - SOR_SEQ_CTL_PU_PC(0) | + SOR_SEQ_PU_PC(0) | SOR_SEQ_PU_PC_ALT(0) | SOR_SEQ_PD_PC(8) | SOR_SEQ_PD_PC_ALT(8), @@ -1394,8 +1394,8 @@ static int tegra_hdmi_exit(struct host1x_client *client) tegra_output_exit(&hdmi->output); - clk_disable_unprepare(hdmi->clk); reset_control_assert(hdmi->rst); + clk_disable_unprepare(hdmi->clk); regulator_disable(hdmi->vdd); regulator_disable(hdmi->pll); diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h index 919a19d..a882514 100644 --- a/drivers/gpu/drm/tegra/hdmi.h +++ b/drivers/gpu/drm/tegra/hdmi.h @@ -201,7 +201,7 @@ #define HDMI_NV_PDISP_SOR_CRCB 0x5d #define HDMI_NV_PDISP_SOR_BLANK 0x5e #define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f -#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0) +#define SOR_SEQ_PU_PC(x) (((x) & 0xf) << 0) #define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4) #define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8) #define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12) diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 2afe478..7591d89 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -41,6 +41,8 @@ struct tegra_sor { struct mutex lock; bool enabled; + struct drm_info_list *debugfs_files; + struct drm_minor *minor; struct dentry *debugfs; }; @@ -68,13 +70,12 @@ static inline struct tegra_sor *to_sor(struct tegra_output *output) return container_of(output, struct tegra_sor, output); } -static inline unsigned long tegra_sor_readl(struct tegra_sor *sor, - unsigned long offset) +static inline u32 tegra_sor_readl(struct tegra_sor *sor, unsigned long offset) { return readl(sor->regs + (offset << 2)); } -static inline void tegra_sor_writel(struct tegra_sor *sor, unsigned long value, +static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value, unsigned long offset) { writel(value, sor->regs + (offset << 2)); @@ -83,9 +84,9 @@ static inline void tegra_sor_writel(struct tegra_sor *sor, unsigned long value, static int tegra_sor_dp_train_fast(struct tegra_sor *sor, struct drm_dp_link *link) { - unsigned long value; unsigned int i; u8 pattern; + u32 value; int err; /* setup lane parameters */ @@ -202,7 +203,7 @@ static void tegra_sor_update(struct tegra_sor *sor) static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout) { - unsigned long value; + u32 value; value = tegra_sor_readl(sor, SOR_PWM_DIV); value &= ~SOR_PWM_DIV_MASK; @@ -281,7 +282,7 @@ static int tegra_sor_wakeup(struct tegra_sor *sor) static int tegra_sor_power_up(struct tegra_sor *sor, unsigned long timeout) { - unsigned long value; + u32 value; value = tegra_sor_readl(sor, SOR_PWR); value |= SOR_PWR_TRIGGER | SOR_PWR_NORMAL_STATE_PU; @@ -674,38 +675,195 @@ static const struct file_operations tegra_sor_crc_fops = { .release = tegra_sor_crc_release, }; +static int tegra_sor_show_regs(struct seq_file *s, void *data) +{ + struct drm_info_node *node = s->private; + struct tegra_sor *sor = node->info_ent->data; + +#define DUMP_REG(name) \ + seq_printf(s, "%-38s %#05x %08x\n", #name, name, \ + tegra_sor_readl(sor, name)) + + DUMP_REG(SOR_CTXSW); + DUMP_REG(SOR_SUPER_STATE_0); + DUMP_REG(SOR_SUPER_STATE_1); + DUMP_REG(SOR_STATE_0); + DUMP_REG(SOR_STATE_1); + DUMP_REG(SOR_HEAD_STATE_0(0)); + DUMP_REG(SOR_HEAD_STATE_0(1)); + DUMP_REG(SOR_HEAD_STATE_1(0)); + DUMP_REG(SOR_HEAD_STATE_1(1)); + DUMP_REG(SOR_HEAD_STATE_2(0)); + DUMP_REG(SOR_HEAD_STATE_2(1)); + DUMP_REG(SOR_HEAD_STATE_3(0)); + DUMP_REG(SOR_HEAD_STATE_3(1)); + DUMP_REG(SOR_HEAD_STATE_4(0)); + DUMP_REG(SOR_HEAD_STATE_4(1)); + DUMP_REG(SOR_HEAD_STATE_5(0)); + DUMP_REG(SOR_HEAD_STATE_5(1)); + DUMP_REG(SOR_CRC_CNTRL); + DUMP_REG(SOR_DP_DEBUG_MVID); + DUMP_REG(SOR_CLK_CNTRL); + DUMP_REG(SOR_CAP); + DUMP_REG(SOR_PWR); + DUMP_REG(SOR_TEST); + DUMP_REG(SOR_PLL_0); + DUMP_REG(SOR_PLL_1); + DUMP_REG(SOR_PLL_2); + DUMP_REG(SOR_PLL_3); + DUMP_REG(SOR_CSTM); + DUMP_REG(SOR_LVDS); + DUMP_REG(SOR_CRC_A); + DUMP_REG(SOR_CRC_B); + DUMP_REG(SOR_BLANK); + DUMP_REG(SOR_SEQ_CTL); + DUMP_REG(SOR_LANE_SEQ_CTL); + DUMP_REG(SOR_SEQ_INST(0)); + DUMP_REG(SOR_SEQ_INST(1)); + DUMP_REG(SOR_SEQ_INST(2)); + DUMP_REG(SOR_SEQ_INST(3)); + DUMP_REG(SOR_SEQ_INST(4)); + DUMP_REG(SOR_SEQ_INST(5)); + DUMP_REG(SOR_SEQ_INST(6)); + DUMP_REG(SOR_SEQ_INST(7)); + DUMP_REG(SOR_SEQ_INST(8)); + DUMP_REG(SOR_SEQ_INST(9)); + DUMP_REG(SOR_SEQ_INST(10)); + DUMP_REG(SOR_SEQ_INST(11)); + DUMP_REG(SOR_SEQ_INST(12)); + DUMP_REG(SOR_SEQ_INST(13)); + DUMP_REG(SOR_SEQ_INST(14)); + DUMP_REG(SOR_SEQ_INST(15)); + DUMP_REG(SOR_PWM_DIV); + DUMP_REG(SOR_PWM_CTL); + DUMP_REG(SOR_VCRC_A_0); + DUMP_REG(SOR_VCRC_A_1); + DUMP_REG(SOR_VCRC_B_0); + DUMP_REG(SOR_VCRC_B_1); + DUMP_REG(SOR_CCRC_A_0); + DUMP_REG(SOR_CCRC_A_1); + DUMP_REG(SOR_CCRC_B_0); + DUMP_REG(SOR_CCRC_B_1); + DUMP_REG(SOR_EDATA_A_0); + DUMP_REG(SOR_EDATA_A_1); + DUMP_REG(SOR_EDATA_B_0); + DUMP_REG(SOR_EDATA_B_1); + DUMP_REG(SOR_COUNT_A_0); + DUMP_REG(SOR_COUNT_A_1); + DUMP_REG(SOR_COUNT_B_0); + DUMP_REG(SOR_COUNT_B_1); + DUMP_REG(SOR_DEBUG_A_0); + DUMP_REG(SOR_DEBUG_A_1); + DUMP_REG(SOR_DEBUG_B_0); + DUMP_REG(SOR_DEBUG_B_1); + DUMP_REG(SOR_TRIG); + DUMP_REG(SOR_MSCHECK); + DUMP_REG(SOR_XBAR_CTRL); + DUMP_REG(SOR_XBAR_POL); + DUMP_REG(SOR_DP_LINKCTL_0); + DUMP_REG(SOR_DP_LINKCTL_1); + DUMP_REG(SOR_LANE_DRIVE_CURRENT_0); + DUMP_REG(SOR_LANE_DRIVE_CURRENT_1); + DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0); + DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1); + DUMP_REG(SOR_LANE_PREEMPHASIS_0); + DUMP_REG(SOR_LANE_PREEMPHASIS_1); + DUMP_REG(SOR_LANE4_PREEMPHASIS_0); + DUMP_REG(SOR_LANE4_PREEMPHASIS_1); + DUMP_REG(SOR_LANE_POST_CURSOR_0); + DUMP_REG(SOR_LANE_POST_CURSOR_1); + DUMP_REG(SOR_DP_CONFIG_0); + DUMP_REG(SOR_DP_CONFIG_1); + DUMP_REG(SOR_DP_MN_0); + DUMP_REG(SOR_DP_MN_1); + DUMP_REG(SOR_DP_PADCTL_0); + DUMP_REG(SOR_DP_PADCTL_1); + DUMP_REG(SOR_DP_DEBUG_0); + DUMP_REG(SOR_DP_DEBUG_1); + DUMP_REG(SOR_DP_SPARE_0); + DUMP_REG(SOR_DP_SPARE_1); + DUMP_REG(SOR_DP_AUDIO_CTRL); + DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS); + DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5); + DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6); + DUMP_REG(SOR_DP_TPG); + DUMP_REG(SOR_DP_TPG_CONFIG); + DUMP_REG(SOR_DP_LQ_CSTM_0); + DUMP_REG(SOR_DP_LQ_CSTM_1); + DUMP_REG(SOR_DP_LQ_CSTM_2); + +#undef DUMP_REG + + return 0; +} + +static const struct drm_info_list debugfs_files[] = { + { "regs", tegra_sor_show_regs, 0, NULL }, +}; + static int tegra_sor_debugfs_init(struct tegra_sor *sor, struct drm_minor *minor) { struct dentry *entry; + unsigned int i; int err = 0; sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root); if (!sor->debugfs) return -ENOMEM; + sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), + GFP_KERNEL); + if (!sor->debugfs_files) { + err = -ENOMEM; + goto remove; + } + + for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) + sor->debugfs_files[i].data = sor; + + err = drm_debugfs_create_files(sor->debugfs_files, + ARRAY_SIZE(debugfs_files), + sor->debugfs, minor); + if (err < 0) + goto free; + entry = debugfs_create_file("crc", 0644, sor->debugfs, sor, &tegra_sor_crc_fops); if (!entry) { - dev_err(sor->dev, - "cannot create /sys/kernel/debug/dri/%s/sor/crc\n", - minor->debugfs_root->d_name.name); err = -ENOMEM; - goto remove; + goto free; } return err; +free: + kfree(sor->debugfs_files); + sor->debugfs_files = NULL; remove: - debugfs_remove(sor->debugfs); + debugfs_remove_recursive(sor->debugfs); sor->debugfs = NULL; return err; } static void tegra_sor_debugfs_exit(struct tegra_sor *sor) { - debugfs_remove_recursive(sor->debugfs); + drm_debugfs_remove_files(sor->debugfs_files, ARRAY_SIZE(debugfs_files), + sor->minor); + sor->minor = NULL; + + kfree(sor->debugfs_files); sor->debugfs = NULL; + + debugfs_remove_recursive(sor->debugfs); + sor->debugfs_files = NULL; } static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode) @@ -791,8 +949,8 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, struct tegra_sor_config config; struct drm_dp_link link; struct drm_dp_aux *aux; - unsigned long value; int err = 0; + u32 value; mutex_lock(&sor->lock); @@ -1354,12 +1512,30 @@ static int tegra_sor_init(struct host1x_client *client) } } + /* + * XXX: Remove this reset once proper hand-over from firmware to + * kernel is possible. + */ + err = reset_control_assert(sor->rst); + if (err < 0) { + dev_err(sor->dev, "failed to assert SOR reset: %d\n", err); + return err; + } + err = clk_prepare_enable(sor->clk); if (err < 0) { dev_err(sor->dev, "failed to enable clock: %d\n", err); return err; } + usleep_range(1000, 3000); + + err = reset_control_deassert(sor->rst); + if (err < 0) { + dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err); + return err; + } + err = clk_prepare_enable(sor->clk_safe); if (err < 0) return err; diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile new file mode 100644 index 0000000..1055cb7 --- /dev/null +++ b/drivers/gpu/drm/vgem/Makefile @@ -0,0 +1,4 @@ +ccflags-y := -Iinclude/drm +vgem-y := vgem_drv.o vgem_dma_buf.o + +obj-$(CONFIG_DRM_VGEM) += vgem.o diff --git a/drivers/gpu/drm/vgem/vgem_dma_buf.c b/drivers/gpu/drm/vgem/vgem_dma_buf.c new file mode 100644 index 0000000..0254438 --- /dev/null +++ b/drivers/gpu/drm/vgem/vgem_dma_buf.c @@ -0,0 +1,94 @@ +/* + * Copyright © 2012 Intel Corporation + * Copyright © 2014 The Chromium OS Authors + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Ben Widawsky <ben@bwidawsk.net> + * + */ + +#include <linux/dma-buf.h> +#include "vgem_drv.h" + +struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj) +{ + struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); + BUG_ON(obj->pages == NULL); + + return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE); +} + +int vgem_gem_prime_pin(struct drm_gem_object *gobj) +{ + struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); + return vgem_gem_get_pages(obj); +} + +void vgem_gem_prime_unpin(struct drm_gem_object *gobj) +{ + struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); + vgem_gem_put_pages(obj); +} + +void *vgem_gem_prime_vmap(struct drm_gem_object *gobj) +{ + struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); + BUG_ON(obj->pages == NULL); + + return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); +} + +void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + vunmap(vaddr); +} + +struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct drm_vgem_gem_object *obj = NULL; + int ret; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (obj == NULL) { + ret = -ENOMEM; + goto fail; + } + + ret = drm_gem_object_init(dev, &obj->base, dma_buf->size); + if (ret) { + ret = -ENOMEM; + goto fail_free; + } + + get_dma_buf(dma_buf); + + obj->base.dma_buf = dma_buf; + obj->use_dma_buf = true; + + return &obj->base; + +fail_free: + kfree(obj); +fail: + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c new file mode 100644 index 0000000..cb3b435 --- /dev/null +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -0,0 +1,364 @@ +/* + * Copyright 2011 Red Hat, Inc. + * Copyright © 2014 The Chromium OS Authors + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software") + * to deal in the software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * them Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Adam Jackson <ajax@redhat.com> + * Ben Widawsky <ben@bwidawsk.net> + */ + +/** + * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's + * software renderer and the X server for efficient buffer sharing. + */ + +#include <linux/module.h> +#include <linux/ramfs.h> +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include "vgem_drv.h" + +#define DRIVER_NAME "vgem" +#define DRIVER_DESC "Virtual GEM provider" +#define DRIVER_DATE "20120112" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +void vgem_gem_put_pages(struct drm_vgem_gem_object *obj) +{ + drm_gem_put_pages(&obj->base, obj->pages, false, false); + obj->pages = NULL; +} + +static void vgem_gem_free_object(struct drm_gem_object *obj) +{ + struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); + + drm_gem_free_mmap_offset(obj); + + if (vgem_obj->use_dma_buf && obj->dma_buf) { + dma_buf_put(obj->dma_buf); + obj->dma_buf = NULL; + } + + drm_gem_object_release(obj); + + if (vgem_obj->pages) + vgem_gem_put_pages(vgem_obj); + + vgem_obj->pages = NULL; + + kfree(vgem_obj); +} + +int vgem_gem_get_pages(struct drm_vgem_gem_object *obj) +{ + struct page **pages; + + if (obj->pages || obj->use_dma_buf) + return 0; + + pages = drm_gem_get_pages(&obj->base); + if (IS_ERR(pages)) { + return PTR_ERR(pages); + } + + obj->pages = pages; + + return 0; +} + +static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_vgem_gem_object *obj = vma->vm_private_data; + struct drm_device *dev = obj->base.dev; + loff_t num_pages; + pgoff_t page_offset; + int ret; + + /* We don't use vmf->pgoff since that has the fake offset */ + page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> + PAGE_SHIFT; + + num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); + + if (page_offset > num_pages) + return VM_FAULT_SIGBUS; + + mutex_lock(&dev->struct_mutex); + + ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, + obj->pages[page_offset]); + + mutex_unlock(&dev->struct_mutex); + switch (ret) { + case 0: + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + case -EBUSY: + return VM_FAULT_RETRY; + case -EFAULT: + case -EINVAL: + return VM_FAULT_SIGBUS; + default: + WARN_ON(1); + return VM_FAULT_SIGBUS; + } +} + +static struct vm_operations_struct vgem_gem_vm_ops = { + .fault = vgem_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +/* ioctls */ + +static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, + struct drm_file *file, + unsigned int *handle, + unsigned long size) +{ + struct drm_vgem_gem_object *obj; + struct drm_gem_object *gem_object; + int err; + + size = roundup(size, PAGE_SIZE); + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return ERR_PTR(-ENOMEM); + + gem_object = &obj->base; + + err = drm_gem_object_init(dev, gem_object, size); + if (err) + goto out; + + err = drm_gem_handle_create(file, gem_object, handle); + if (err) + goto handle_out; + + drm_gem_object_unreference_unlocked(gem_object); + + return gem_object; + +handle_out: + drm_gem_object_release(gem_object); +out: + kfree(obj); + return ERR_PTR(err); +} + +static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct drm_gem_object *gem_object; + uint64_t size; + uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8); + + size = args->height * pitch; + if (size == 0) + return -EINVAL; + + gem_object = vgem_gem_create(dev, file, &args->handle, size); + + if (IS_ERR(gem_object)) { + DRM_DEBUG_DRIVER("object creation failed\n"); + return PTR_ERR(gem_object); + } + + args->size = gem_object->size; + args->pitch = pitch; + + DRM_DEBUG_DRIVER("Created object of size %lld\n", size); + + return 0; +} + +int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, + uint32_t handle, uint64_t *offset) +{ + int ret = 0; + struct drm_gem_object *obj; + + mutex_lock(&dev->struct_mutex); + obj = drm_gem_object_lookup(dev, file, handle); + if (!obj) { + ret = -ENOENT; + goto unlock; + } + + if (!drm_vma_node_has_offset(&obj->vma_node)) { + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto unref; + } + + BUG_ON(!obj->filp); + + obj->filp->private_data = obj; + + ret = vgem_gem_get_pages(to_vgem_bo(obj)); + if (ret) + goto fail_get_pages; + + *offset = drm_vma_node_offset_addr(&obj->vma_node); + + goto unref; + +fail_get_pages: + drm_gem_free_mmap_offset(obj); +unref: + drm_gem_object_unreference(obj); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int vgem_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->minor->dev; + struct drm_vma_offset_node *node; + struct drm_gem_object *obj; + struct drm_vgem_gem_object *vgem_obj; + int ret = 0; + + mutex_lock(&dev->struct_mutex); + + node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, + vma->vm_pgoff, + vma_pages(vma)); + if (!node) { + ret = -EINVAL; + goto out_unlock; + } else if (!drm_vma_node_is_allowed(node, filp)) { + ret = -EACCES; + goto out_unlock; + } + + obj = container_of(node, struct drm_gem_object, vma_node); + + vgem_obj = to_vgem_bo(obj); + + if (obj->dma_buf && vgem_obj->use_dma_buf) { + ret = dma_buf_mmap(obj->dma_buf, vma, 0); + goto out_unlock; + } + + if (!obj->dev->driver->gem_vm_ops) { + ret = -EINVAL; + goto out_unlock; + } + + vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = obj->dev->driver->gem_vm_ops; + vma->vm_private_data = vgem_obj; + vma->vm_page_prot = + pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + + mutex_unlock(&dev->struct_mutex); + drm_gem_vm_open(vma); + return ret; + +out_unlock: + mutex_unlock(&dev->struct_mutex); + + return ret; +} + + +static struct drm_ioctl_desc vgem_ioctls[] = { +}; + +static const struct file_operations vgem_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .mmap = vgem_drm_gem_mmap, + .poll = drm_poll, + .read = drm_read, + .unlocked_ioctl = drm_ioctl, + .release = drm_release, +}; + +static struct drm_driver vgem_driver = { + .driver_features = DRIVER_GEM | DRIVER_PRIME, + .gem_free_object = vgem_gem_free_object, + .gem_vm_ops = &vgem_gem_vm_ops, + .ioctls = vgem_ioctls, + .fops = &vgem_driver_fops, + .dumb_create = vgem_gem_dumb_create, + .dumb_map_offset = vgem_gem_dumb_map, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = vgem_gem_prime_import, + .gem_prime_pin = vgem_gem_prime_pin, + .gem_prime_unpin = vgem_gem_prime_unpin, + .gem_prime_get_sg_table = vgem_gem_prime_get_sg_table, + .gem_prime_vmap = vgem_gem_prime_vmap, + .gem_prime_vunmap = vgem_gem_prime_vunmap, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, +}; + +struct drm_device *vgem_device; + +static int __init vgem_init(void) +{ + int ret; + + vgem_device = drm_dev_alloc(&vgem_driver, NULL); + if (!vgem_device) { + ret = -ENOMEM; + goto out; + } + + ret = drm_dev_register(vgem_device, 0); + + if (ret) + goto out_unref; + + return 0; + +out_unref: + drm_dev_unref(vgem_device); +out: + return ret; +} + +static void __exit vgem_exit(void) +{ + drm_dev_unregister(vgem_device); + drm_dev_unref(vgem_device); +} + +module_init(vgem_init); +module_exit(vgem_exit); + +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h new file mode 100644 index 0000000..57ab4d8 --- /dev/null +++ b/drivers/gpu/drm/vgem/vgem_drv.h @@ -0,0 +1,57 @@ +/* + * Copyright © 2012 Intel Corporation + * Copyright © 2014 The Chromium OS Authors + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Ben Widawsky <ben@bwidawsk.net> + * + */ + +#ifndef _VGEM_DRV_H_ +#define _VGEM_DRV_H_ + +#include <drm/drmP.h> +#include <drm/drm_gem.h> + +#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base) +struct drm_vgem_gem_object { + struct drm_gem_object base; + struct page **pages; + bool use_dma_buf; +}; + +/* vgem_drv.c */ +extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj); +extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj); + +/* vgem_dma_buf.c */ +extern struct sg_table *vgem_gem_prime_get_sg_table( + struct drm_gem_object *gobj); +extern int vgem_gem_prime_pin(struct drm_gem_object *gobj); +extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj); +extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj); +extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + + +#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e13b9cb..620bb5c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -134,7 +134,7 @@ */ #define VMW_IOCTL_DEF(ioctl, func, flags) \ - [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl} + [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} /** * Ioctl definitions. @@ -1044,7 +1044,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, const struct drm_ioctl_desc *ioctl = &vmw_ioctls[nr - DRM_COMMAND_BASE]; - if (unlikely(ioctl->cmd_drv != cmd)) { + if (unlikely(ioctl->cmd != cmd)) { DRM_ERROR("Invalid command format, ioctl %d\n", nr - DRM_COMMAND_BASE); return -EINVAL; diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index b10550e..6b7fdc1 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -425,6 +425,12 @@ u32 host1x_syncpt_read_min(struct host1x_syncpt *sp) } EXPORT_SYMBOL(host1x_syncpt_read_min); +u32 host1x_syncpt_read(struct host1x_syncpt *sp) +{ + return host1x_syncpt_load(sp); +} +EXPORT_SYMBOL(host1x_syncpt_read); + int host1x_syncpt_nb_pts(struct host1x *host) { return host->info->nb_pts; diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c index 3ddfb3d..2970c6b 100644 --- a/drivers/gpu/ipu-v3/ipu-di.c +++ b/drivers/gpu/ipu-v3/ipu-di.c @@ -441,8 +441,7 @@ static void ipu_di_config_clock(struct ipu_di *di, in_rate = clk_get_rate(clk); div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock); - if (div == 0) - div = 1; + div = clamp(div, 1U, 255U); clkgen0 = div << 4; } @@ -459,8 +458,7 @@ static void ipu_di_config_clock(struct ipu_di *di, clkrate = clk_get_rate(di->clk_ipu); div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock); - if (div == 0) - div = 1; + div = clamp(div, 1U, 255U); rate = clkrate / div; error = rate / (sig->mode.pixelclock / 1000); @@ -483,8 +481,7 @@ static void ipu_di_config_clock(struct ipu_di *di, in_rate = clk_get_rate(clk); div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock); - if (div == 0) - div = 1; + div = clamp(div, 1U, 255U); clkgen0 = div << 4; } diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c index ad75588..1dcb96c 100644 --- a/drivers/gpu/ipu-v3/ipu-ic.c +++ b/drivers/gpu/ipu-v3/ipu-ic.c @@ -297,8 +297,8 @@ static int calc_resize_coeffs(struct ipu_ic *ic, return -EINVAL; } - /* Cannot downsize more than 8:1 */ - if ((out_size << 3) < in_size) { + /* Cannot downsize more than 4:1 */ + if ((out_size << 2) < in_size) { dev_err(ipu->dev, "Unsupported downsize\n"); return -EINVAL; } |