summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-01-16 14:28:01 +1300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-01-16 14:28:01 +1300
commit3fa116e8bdd94b4075cfb1e927f8c1fd3e31f582 (patch)
tree3720155d4687826bfd78f24c5dfacab61667c6f7
parentf800c25b7a762d445ba1439a2428c8362157eba6 (diff)
parenta87e810f61b49f19bd29ea564b7cd1e92e43d989 (diff)
Merge tag 'powerpc-3.19-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull powerpc fixes from Michael Ellerman: "A few powerpc fixes" * tag 'powerpc-3.19-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: powerpc: Work around gcc bug in current_thread_info() cxl: Fix issues when unmapping contexts powernv: Fix OPAL tracepoint code
-rw-r--r--arch/powerpc/include/asm/thread_info.h13
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S1
-rw-r--r--drivers/misc/cxl/context.c82
-rw-r--r--drivers/misc/cxl/file.c14
4 files changed, 78 insertions, 32 deletions
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index ebc4f165690..0be6c681cab 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -23,9 +23,9 @@
#define THREAD_SIZE (1 << THREAD_SHIFT)
#ifdef CONFIG_PPC64
-#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
#else
-#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
#endif
#ifndef __ASSEMBLY__
@@ -71,12 +71,13 @@ struct thread_info {
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
/* how to get the thread information struct from C */
-register unsigned long __current_r1 asm("r1");
static inline struct thread_info *current_thread_info(void)
{
- /* gcc4, at least, is smart enough to turn this into a single
- * rlwinm for ppc32 and clrrdi for ppc64 */
- return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1));
+ unsigned long val;
+
+ asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
+
+ return (struct thread_info *)val;
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 54eca8b3b28..0509bca5e83 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \
b 1f; \
END_FTR_SECTION(0, 1); \
ld r12,opal_tracepoint_refcount@toc(r2); \
- std r12,32(r1); \
cmpdi r12,0; \
bne- LABEL; \
1:
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 51fd6b52437..d1b55fe6281 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return 0;
}
+static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct cxl_context *ctx = vma->vm_file->private_data;
+ unsigned long address = (unsigned long)vmf->virtual_address;
+ u64 area, offset;
+
+ offset = vmf->pgoff << PAGE_SHIFT;
+
+ pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
+ __func__, ctx->pe, address, offset);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ area = ctx->afu->psn_phys;
+ if (offset > ctx->afu->adapter->ps_size)
+ return VM_FAULT_SIGBUS;
+ } else {
+ area = ctx->psn_phys;
+ if (offset > ctx->psn_size)
+ return VM_FAULT_SIGBUS;
+ }
+
+ mutex_lock(&ctx->status_mutex);
+
+ if (ctx->status != STARTED) {
+ mutex_unlock(&ctx->status_mutex);
+ pr_devel("%s: Context not started, failing problem state access\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+
+ mutex_unlock(&ctx->status_mutex);
+
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct cxl_mmap_vmops = {
+ .fault = cxl_mmap_fault,
+};
+
/*
* Map a per-context mmio space into the given vma.
*/
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
u64 len = vma->vm_end - vma->vm_start;
len = min(len, ctx->psn_size);
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
- }
+ if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
+ /* make sure there is a valid per process space for this AFU */
+ if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
+ pr_devel("AFU doesn't support mmio space\n");
+ return -EINVAL;
+ }
- /* make sure there is a valid per process space for this AFU */
- if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
- pr_devel("AFU doesn't support mmio space\n");
- return -EINVAL;
+ /* Can't mmap until the AFU is enabled */
+ if (!ctx->afu->enabled)
+ return -EBUSY;
}
- /* Can't mmap until the AFU is enabled */
- if (!ctx->afu->enabled)
- return -EBUSY;
-
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master);
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->psn_phys, len);
+ vma->vm_ops = &cxl_mmap_vmops;
+ return 0;
}
/*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx);
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
wake_up_all(&ctx->wq);
-
- /* Release Problem State Area mapping */
- mutex_lock(&ctx->mapping_lock);
- if (ctx->mapping)
- unmap_mapping_range(ctx->mapping, 0, 0, 1);
- mutex_unlock(&ctx->mapping_lock);
}
/*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
* created and torn down after the IDR removed
*/
__detach_context(ctx);
+
+ /*
+ * We are force detaching - remove any active PSA mappings so
+ * userspace cannot interfere with the card if it comes back.
+ * Easiest way to exercise this is to unbind and rebind the
+ * driver via sysfs while it is in use.
+ */
+ mutex_lock(&ctx->mapping_lock);
+ if (ctx->mapping)
+ unmap_mapping_range(ctx->mapping, 0, 0, 1);
+ mutex_unlock(&ctx->mapping_lock);
}
mutex_unlock(&afu->contexts_lock);
}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e9f2f10dbb3..b15d8113877 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
pr_devel("%s: pe: %i\n", __func__, ctx->pe);
- mutex_lock(&ctx->status_mutex);
- if (ctx->status != OPENED) {
- rc = -EIO;
- goto out;
- }
-
+ /* Do this outside the status_mutex to avoid a circular dependency with
+ * the locking in cxl_mmap_fault() */
if (copy_from_user(&work, uwork,
sizeof(struct cxl_ioctl_start_work))) {
rc = -EFAULT;
goto out;
}
+ mutex_lock(&ctx->status_mutex);
+ if (ctx->status != OPENED) {
+ rc = -EIO;
+ goto out;
+ }
+
/*
* if any of the reserved fields are set or any of the unused
* flags are set it's invalid