diff options
Diffstat (limited to 'drivers/misc')
-rw-r--r-- | drivers/misc/cxl/context.c | 82 | ||||
-rw-r--r-- | drivers/misc/cxl/file.c | 14 | ||||
-rw-r--r-- | drivers/misc/mei/hw-me.c | 12 | ||||
-rw-r--r-- | drivers/misc/vmw_vmci/vmci_queue_pair.c | 16 |
4 files changed, 91 insertions, 33 deletions
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 51fd6b5..d1b55fe 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, return 0; } +static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct cxl_context *ctx = vma->vm_file->private_data; + unsigned long address = (unsigned long)vmf->virtual_address; + u64 area, offset; + + offset = vmf->pgoff << PAGE_SHIFT; + + pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n", + __func__, ctx->pe, address, offset); + + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { + area = ctx->afu->psn_phys; + if (offset > ctx->afu->adapter->ps_size) + return VM_FAULT_SIGBUS; + } else { + area = ctx->psn_phys; + if (offset > ctx->psn_size) + return VM_FAULT_SIGBUS; + } + + mutex_lock(&ctx->status_mutex); + + if (ctx->status != STARTED) { + mutex_unlock(&ctx->status_mutex); + pr_devel("%s: Context not started, failing problem state access\n", __func__); + return VM_FAULT_SIGBUS; + } + + vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); + + mutex_unlock(&ctx->status_mutex); + + return VM_FAULT_NOPAGE; +} + +static const struct vm_operations_struct cxl_mmap_vmops = { + .fault = cxl_mmap_fault, +}; + /* * Map a per-context mmio space into the given vma. */ @@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) u64 len = vma->vm_end - vma->vm_start; len = min(len, ctx->psn_size); - if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); - } + if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { + /* make sure there is a valid per process space for this AFU */ + if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { + pr_devel("AFU doesn't support mmio space\n"); + return -EINVAL; + } - /* make sure there is a valid per process space for this AFU */ - if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { - pr_devel("AFU doesn't support mmio space\n"); - return -EINVAL; + /* Can't mmap until the AFU is enabled */ + if (!ctx->afu->enabled) + return -EBUSY; } - /* Can't mmap until the AFU is enabled */ - if (!ctx->afu->enabled) - return -EBUSY; - pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, ctx->psn_phys, ctx->pe , ctx->master); + vma->vm_flags |= VM_IO | VM_PFNMAP; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - return vm_iomap_memory(vma, ctx->psn_phys, len); + vma->vm_ops = &cxl_mmap_vmops; + return 0; } /* @@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx) afu_release_irqs(ctx); flush_work(&ctx->fault_work); /* Only needed for dedicated process */ wake_up_all(&ctx->wq); - - /* Release Problem State Area mapping */ - mutex_lock(&ctx->mapping_lock); - if (ctx->mapping) - unmap_mapping_range(ctx->mapping, 0, 0, 1); - mutex_unlock(&ctx->mapping_lock); } /* @@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu) * created and torn down after the IDR removed */ __detach_context(ctx); + + /* + * We are force detaching - remove any active PSA mappings so + * userspace cannot interfere with the card if it comes back. + * Easiest way to exercise this is to unbind and rebind the + * driver via sysfs while it is in use. + */ + mutex_lock(&ctx->mapping_lock); + if (ctx->mapping) + unmap_mapping_range(ctx->mapping, 0, 0, 1); + mutex_unlock(&ctx->mapping_lock); } mutex_unlock(&afu->contexts_lock); } diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index e9f2f10..b15d811 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, pr_devel("%s: pe: %i\n", __func__, ctx->pe); - mutex_lock(&ctx->status_mutex); - if (ctx->status != OPENED) { - rc = -EIO; - goto out; - } - + /* Do this outside the status_mutex to avoid a circular dependency with + * the locking in cxl_mmap_fault() */ if (copy_from_user(&work, uwork, sizeof(struct cxl_ioctl_start_work))) { rc = -EFAULT; goto out; } + mutex_lock(&ctx->status_mutex); + if (ctx->status != OPENED) { + rc = -EIO; + goto out; + } + /* * if any of the reserved fields are set or any of the unused * flags are set it's invalid diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index ff27550..06ff0a2 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -234,6 +234,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) struct mei_me_hw *hw = to_me_hw(dev); u32 hcsr = mei_hcsr_read(hw); + /* H_RST may be found lit before reset is started, + * for example if preceding reset flow hasn't completed. + * In that case asserting H_RST will be ignored, therefore + * we need to clean H_RST bit to start a successful reset sequence. + */ + if ((hcsr & H_RST) == H_RST) { + dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); + hcsr &= ~H_RST; + mei_me_reg_write(hw, H_CSR, hcsr); + hcsr = mei_hcsr_read(hw); + } + hcsr |= H_RST | H_IG | H_IS; if (intr_enable) diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 7aaaf51..35f19a6 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -370,12 +370,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue, to_copy = size - bytes_copied; if (is_iovec) { - struct iovec *iov = (struct iovec *)src; + struct msghdr *msg = (struct msghdr *)src; int err; /* The iovec will track bytes_copied internally. */ - err = memcpy_fromiovec((u8 *)va + page_offset, - iov, to_copy); + err = memcpy_from_msg((u8 *)va + page_offset, + msg, to_copy); if (err != 0) { if (kernel_if->host) kunmap(kernel_if->u.h.page[page_index]); @@ -580,7 +580,7 @@ static int qp_memcpy_from_queue(void *dest, */ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, u64 queue_offset, - const void *src, + const void *msg, size_t src_offset, size_t size) { @@ -588,7 +588,7 @@ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, * We ignore src_offset because src is really a struct iovec * and will * maintain offset internally. */ - return __qp_memcpy_to_queue(queue, queue_offset, src, size, true); + return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true); } /* @@ -3223,13 +3223,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_peek); * of bytes enqueued or < 0 on error. */ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, - void *iov, + struct msghdr *msg, size_t iov_size, int buf_type) { ssize_t result; - if (!qpair || !iov) + if (!qpair) return VMCI_ERROR_INVALID_ARGS; qp_lock(qpair); @@ -3238,7 +3238,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, result = qp_enqueue_locked(qpair->produce_q, qpair->consume_q, qpair->produce_q_size, - iov, iov_size, + msg, iov_size, qp_memcpy_to_queue_iov); if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && |