summaryrefslogtreecommitdiff
path: root/arch/alpha/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-20 15:03:45 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-20 15:03:45 -0800
commitd5bdaf4f68f0590fc481bca54bcaffeb27b75fca (patch)
treea3c02e146c8c3b33e77e212f15d4851b6febd26b /arch/alpha/lib
parentdc6ec87d4c77029457a79fd6fa9b6f8304dc8348 (diff)
parent5cfe8f1ba5eebe6f4b6e5858cdb1a5be4f3272a6 (diff)
downloadlinux-d5bdaf4f68f0590fc481bca54bcaffeb27b75fca.tar.gz
linux-d5bdaf4f68f0590fc481bca54bcaffeb27b75fca.tar.bz2
linux-d5bdaf4f68f0590fc481bca54bcaffeb27b75fca.zip
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha
Pull alpha updates from Matt Turner: "It contains a few fixes and some work from Richard to make alpha emulation under QEMU much more usable" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha: alpha: Prevent a NULL ptr dereference in csum_partial_copy. alpha: perf: fix out-of-bounds array access triggered from raw event alpha: Use qemu+cserve provided high-res clock and alarm. alpha: Switch to GENERIC_CLOCKEVENTS alpha: Enable the rpcc clocksource for single processor alpha: Reorganize rtc handling alpha: Primitive support for CPU power down. alpha: Allow HZ to be configured alpha: Notice if we're being run under QEMU alpha: Eliminate compiler warning from memset macro
Diffstat (limited to 'arch/alpha/lib')
-rw-r--r--arch/alpha/lib/csum_partial_copy.c10
-rw-r--r--arch/alpha/lib/ev6-memset.S12
-rw-r--r--arch/alpha/lib/memset.S11
3 files changed, 19 insertions, 14 deletions
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index ffb19b7da999..ff3c10721caf 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -130,7 +130,7 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
*dst = word | tmp;
checksum += carry;
}
- if (err) *errp = err;
+ if (err && errp) *errp = err;
return checksum;
}
@@ -185,7 +185,7 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src,
*dst = word | tmp;
checksum += carry;
}
- if (err) *errp = err;
+ if (err && errp) *errp = err;
return checksum;
}
@@ -242,7 +242,7 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src,
stq_u(partial_dest | second_dest, dst);
out:
checksum += carry;
- if (err) *errp = err;
+ if (err && errp) *errp = err;
return checksum;
}
@@ -325,7 +325,7 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
stq_u(partial_dest | word | second_dest, dst);
checksum += carry;
}
- if (err) *errp = err;
+ if (err && errp) *errp = err;
return checksum;
}
@@ -339,7 +339,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
if (len) {
if (!access_ok(VERIFY_READ, src, len)) {
- *errp = -EFAULT;
+ if (errp) *errp = -EFAULT;
memset(dst, 0, len);
return sum;
}
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index d8b94e1c7fca..356bb2fdd705 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -30,14 +30,15 @@
.set noat
.set noreorder
.text
+ .globl memset
.globl __memset
+ .globl ___memset
.globl __memsetw
.globl __constant_c_memset
- .globl memset
- .ent __memset
+ .ent ___memset
.align 5
-__memset:
+___memset:
.frame $30,0,$26,0
.prologue 0
@@ -227,7 +228,7 @@ end_b:
nop
nop
ret $31,($26),1 # L0 :
- .end __memset
+ .end ___memset
/*
* This is the original body of code, prior to replication and
@@ -594,4 +595,5 @@ end_w:
.end __memsetw
-memset = __memset
+memset = ___memset
+__memset = ___memset
diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S
index 311b8cfc6914..76ccc6d1f364 100644
--- a/arch/alpha/lib/memset.S
+++ b/arch/alpha/lib/memset.S
@@ -19,11 +19,13 @@
.text
.globl memset
.globl __memset
+ .globl ___memset
.globl __memsetw
.globl __constant_c_memset
- .ent __memset
+
+ .ent ___memset
.align 5
-__memset:
+___memset:
.frame $30,0,$26,0
.prologue 0
@@ -103,7 +105,7 @@ within_one_quad:
end:
ret $31,($26),1 /* E1 */
- .end __memset
+ .end ___memset
.align 5
.ent __memsetw
@@ -121,4 +123,5 @@ __memsetw:
.end __memsetw
-memset = __memset
+memset = ___memset
+__memset = ___memset