diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 80e742a1c162..5a9e12bd6541 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -28,7 +28,7 @@ struct page;
 
 void clear_page_asm(void *page);
 void copy_page_asm(void *to, void *from);
-#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
+void clear_user_page(void *vto, unsigned long vaddr, struct page *pg);
 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 			struct page *pg);
 
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 977f0a4f5ecf..7edb48530c64 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -443,16 +443,36 @@ void flush_kernel_dcache_page_addr(void *addr)
 }
 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
 
+void clear_user_page(void *vto, unsigned long vaddr, struct page *pg)
+{
+	/* Flush `to' page using a mapping equivalent to the user mapping
+	 * if the CPU requires coherency. This is necessary to ensure
+	 * coherency with the kernel mapping. */
+	if (parisc_requires_coherency())
+		flush_dcache_page_asm(__pa(vto), vaddr);
+
+	/* Clear page using kernel mapping.  The kernel mapping is flushed
+	 * in kunmap. */
+	clear_page_asm(vto);
+}
+EXPORT_SYMBOL(clear_user_page);
+
 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 	struct page *pg)
 {
-       /* Copy using kernel mapping.  No coherency is needed (all in
-	  kunmap) for the `to' page.  However, the `from' page needs to
-	  be flushed through a mapping equivalent to the user mapping
-	  before it can be accessed through the kernel mapping. */
-	preempt_disable();
+	/* Flush `to' page using a mapping equivalent to the user mapping
+	 * if the CPU requires coherency. This is necessary to ensure
+	 * coherency with the kernel mapping. */
+	if (parisc_requires_coherency())
+		flush_dcache_page_asm(__pa(vto), vaddr);
+
+       /* The `from' page needs to be flushed through a mapping equivalent
+	* to the user mapping before it can be accessed through the kernel
+	* mapping. */
 	flush_dcache_page_asm(__pa(vfrom), vaddr);
-	preempt_enable();
+
+	/* Copy page using kernel mapping.  The kernel mappings are flushed
+	 * in kunmap. */
 	copy_page_asm(vto, vfrom);
 }
 EXPORT_SYMBOL(copy_user_page);
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index adf7187f8951..cc288f113021 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -623,17 +623,9 @@ ENTRY_CFI(copy_user_page_asm)
 	depwi		1, 9,1, %r29		/* Form aliased virtual address 'from' */
 #endif
 
-	/* Purge any old translations */
-
-#ifdef CONFIG_PA20
-	pdtlb,l		%r0(%r28)
-	pdtlb,l		%r0(%r29)
-#else
-	tlb_lock	%r20,%r21,%r22
-	pdtlb		%r0(%r28)
-	pdtlb		%r0(%r29)
-	tlb_unlock	%r20,%r21,%r22
-#endif
+	/* Save 'to' and 'from' addresses */
+	copy		%r28, %r24
+	copy		%r29, %r25
 
 #ifdef CONFIG_64BIT
 	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
@@ -743,6 +735,19 @@ ENTRY_CFI(copy_user_page_asm)
 	ldo		64(%r29), %r29
 #endif
 
+	/* Purge the tmp alias TLB entries.  We used to purge the entries
+	 * prior to the operation but we need to remove them afterwards
+	 * on systems that only support equivalent aliasing.  */
+#ifdef CONFIG_PA20
+	pdtlb,l		%r0(%r24)
+	pdtlb,l		%r0(%r25)
+#else
+	tlb_lock	%r20,%r21,%r22
+	pdtlb		%r0(%r24)
+	pdtlb		%r0(%r25)
+	tlb_unlock	%r20,%r21,%r22
+#endif
+
 	bv		%r0(%r2)
 	nop
 	.exit
@@ -771,15 +776,8 @@ ENTRY_CFI(clear_user_page_asm)
 	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
 #endif
 
-	/* Purge any old translation */
-
-#ifdef CONFIG_PA20
-	pdtlb,l		%r0(%r28)
-#else
-	tlb_lock	%r20,%r21,%r22
-	pdtlb		%r0(%r28)
-	tlb_unlock	%r20,%r21,%r22
-#endif
+	/* Save 'to' address */
+	copy		%r28, %r24
 
 #ifdef CONFIG_64BIT
 	ldi		(PAGE_SIZE / 128), %r1
@@ -829,6 +827,17 @@ ENTRY_CFI(clear_user_page_asm)
 	ldo		64(%r28), %r28
 #endif	/* CONFIG_64BIT */
 
+	/* Purge the tmp alias TLB entry.  We used to purge the entry
+	 * prior to the operation but we need to remove the entry
+	 * afterwards on systems that only support equivalent aliasing.  */
+#ifdef CONFIG_PA20
+	pdtlb,l		%r0(%r24)
+#else
+	tlb_lock	%r20,%r21,%r22
+	pdtlb		%r0(%r24)
+	tlb_unlock	%r20,%r21,%r22
+#endif
+
 	bv		%r0(%r2)
 	nop
 	.exit
@@ -855,16 +864,6 @@ ENTRY_CFI(flush_dcache_page_asm)
 	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
 #endif
 
-	/* Purge any old translation */
-
-#ifdef CONFIG_PA20
-	pdtlb,l		%r0(%r28)
-#else
-	tlb_lock	%r20,%r21,%r22
-	pdtlb		%r0(%r28)
-	tlb_unlock	%r20,%r21,%r22
-#endif
-
 	ldil		L%dcache_stride, %r1
 	ldw		R%dcache_stride(%r1), r31
 
@@ -876,6 +875,8 @@ ENTRY_CFI(flush_dcache_page_asm)
 	add		%r28, %r25, %r25
 	sub		%r25, r31, %r25
 
+	/* Save 'to' address */
+	copy		%r28, %r24
 
 1:      fdc,m		r31(%r28)
 	fdc,m		r31(%r28)
@@ -896,6 +897,18 @@ ENTRY_CFI(flush_dcache_page_asm)
 	fdc,m		r31(%r28)
 
 	sync
+
+	/* Purge the tmp alias TLB entry.  We used to purge the entry
+	 * prior to the operation but we need to remove the entry
+	 * afterwards on systems that only support equivalent aliasing.  */
+#ifdef CONFIG_PA20
+	pdtlb,l		%r0(%r24)
+#else
+	tlb_lock	%r20,%r21,%r22
+	pdtlb		%r0(%r24)
+	tlb_unlock	%r20,%r21,%r22
+#endif
+
 	bv		%r0(%r2)
 	nop
 	.exit
@@ -922,21 +935,6 @@ ENTRY_CFI(flush_icache_page_asm)
 	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
 #endif
 
-	/* Purge any old translation.  Note that the FIC instruction
-	 * may use either the instruction or data TLB.  Given that we
-	 * have a flat address space, it's not clear which TLB will be
-	 * used.  So, we purge both entries.  */
-
-#ifdef CONFIG_PA20
-	pdtlb,l		%r0(%r28)
-	pitlb,l         %r0(%sr4,%r28)
-#else
-	tlb_lock        %r20,%r21,%r22
-	pdtlb		%r0(%r28)
-	pitlb           %r0(%sr4,%r28)
-	tlb_unlock      %r20,%r21,%r22
-#endif
-
 	ldil		L%icache_stride, %r1
 	ldw		R%icache_stride(%r1), %r31
 
@@ -948,6 +946,8 @@ ENTRY_CFI(flush_icache_page_asm)
 	add		%r28, %r25, %r25
 	sub		%r25, %r31, %r25
 
+	/* Save 'to' address */
+	copy		%r28, %r24
 
 	/* fic only has the type 26 form on PA1.1, requiring an
 	 * explicit space specification, so use %sr4 */
@@ -970,6 +970,23 @@ ENTRY_CFI(flush_icache_page_asm)
 	fic,m		%r31(%sr4,%r28)
 
 	sync
+
+	/* Purge the tmp alias TLB entry.  Note that the FIC instruction
+	 * may use either the instruction or data TLB.  Given that we
+	 * have a flat address space, it's not clear which TLB will be
+	 * used.  So, we purge both entries.  We used to purge the entry
+	 * prior to the operation but we need to remove the entry
+	 * afterwards on systems that only support equivalent aliasing.  */
+#ifdef CONFIG_PA20
+	pdtlb,l		%r0(%r24)
+	pitlb,l		%r0(%sr4,%r24)
+#else
+	tlb_lock	%r20,%r21,%r22
+	pdtlb		%r0(%r24)
+	pitlb		%r0(%sr4,%r24)
+	tlb_unlock	%r20,%r21,%r22
+#endif
+
 	bv		%r0(%r2)
 	nop
 	.exit
