From 2fb704d57a3f6db8376ee12f8c9ad858d04c91cb Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Wed, 2 May 2018 13:13:22 -0700 Subject: MIPS: Initial nanoMIPS support This patch introduces initial support for the new nanoMIPS ISA, the associated P32 ABI and the MIPS I7200 CPU. nanoMIPS has a variable-length compressed instruction set that is completely standalone from the other MIPS ISAs. It is designed to compress the highest frequency instructions to 16-bits, and use 48-bit instructions to efficiently encode 32-bit constants into the instruction stream. There is also a wider range of 32-bit instructions, which merge carefully chosen high frequency instruction sequences into single operations creating more flexible addressing modes such as indexed and scaled indexed addressing, branch compare with immediate and macro style instructions. The macro like instructions compress prologue and epilogue sequences, as well as a small number of high frequency instruction pairs like two move instructions or a move and function call. nanoMIPS also totally eliminates branch delay slots which follows a precedent set by microMIPSR6. Due to the binary incompatibility between previous MIPS architecture generations and nanoMIPS, and the significantly revamped compiler ABI, where for the first time, a single Linux kernel would not be expected to handle both old and new ABIs, we have decided to also take the opportunity to modernise the Linux user ABI for nanoMIPS, making as much use of generic interfaces as possible and modernising the true architecture specific parts. This is similar to what a whole new kernel architecture would be expected to adopt, but has been done within the existing MIPS architecture port to allow reuse of the existing MIPS code, most of which does not depend on these ABI specifics. Details of the proposed Linux user ABI changes for nanoMIPS can be found here: https://www.linux-mips.org/wiki/P32_Linux_ABI nanoMIPS pre-built toolchains and source code tarballs are available at: http://codescape.mips.com/components/toolchain/nanomips/2018.04-02/ This single patch is being released as the most expedient path to releasing this work into the wild, but is of course not intended to be submitted or merged upstream. This work will be prepared & submitted as series of smaller patches in the coming months. Co-authored-by: James Hogan Co-authored-by: Marcin Nowakowski Co-authored-by: Matt Redfearn Signed-off-by: Paul Burton --- .../devicetree/bindings/misc/mti,mips-cpc.txt | 8 + arch/alpha/include/asm/unistd.h | 1 + arch/arc/include/uapi/asm/unistd.h | 2 + arch/arm/include/asm/unistd.h | 1 + arch/arm64/include/uapi/asm/unistd.h | 2 + arch/blackfin/include/asm/unistd.h | 1 + arch/c6x/include/uapi/asm/unistd.h | 2 + arch/cris/include/asm/unistd.h | 1 + arch/frv/include/asm/unistd.h | 1 + arch/h8300/include/uapi/asm/unistd.h | 2 + arch/hexagon/include/uapi/asm/unistd.h | 2 + arch/ia64/include/asm/unistd.h | 2 + arch/m32r/include/asm/unistd.h | 1 + arch/m68k/include/asm/unistd.h | 1 + arch/metag/include/uapi/asm/unistd.h | 2 + arch/microblaze/include/asm/unistd.h | 1 + arch/mips/Kconfig | 85 +++- arch/mips/Kconfig.debug | 14 + arch/mips/Makefile | 23 +- arch/mips/boot/dts/img/boston.dts | 2 + arch/mips/cavium-octeon/octeon-memcpy.S | 211 ++++---- arch/mips/configs/generic/nano32r6.config | 2 + arch/mips/include/asm/Kbuild | 1 + arch/mips/include/asm/abi.h | 3 + arch/mips/include/asm/asm.h | 24 +- arch/mips/include/asm/asmmacro.h | 34 +- arch/mips/include/asm/atomic.h | 36 +- arch/mips/include/asm/barrier.h | 14 +- arch/mips/include/asm/bitops.h | 25 +- arch/mips/include/asm/branch.h | 14 +- arch/mips/include/asm/bug.h | 2 +- arch/mips/include/asm/cmpxchg.h | 32 +- arch/mips/include/asm/compiler.h | 7 +- arch/mips/include/asm/cpu-features.h | 69 ++- arch/mips/include/asm/cpu-type.h | 7 +- arch/mips/include/asm/cpu.h | 6 +- arch/mips/include/asm/dsemul.h | 34 ++ arch/mips/include/asm/elf.h | 28 +- arch/mips/include/asm/fpregdef.h | 11 +- arch/mips/include/asm/fpu.h | 3 + arch/mips/include/asm/fpu_emulator.h | 16 + arch/mips/include/asm/futex.h | 43 +- arch/mips/include/asm/hazards.h | 18 +- arch/mips/include/asm/io.h | 10 +- arch/mips/include/asm/irqflags.h | 3 +- arch/mips/include/asm/local.h | 6 +- arch/mips/include/asm/mips-cm.h | 4 + arch/mips/include/asm/mips_mt.h | 6 + arch/mips/include/asm/mipsmtregs.h | 161 ++++++- arch/mips/include/asm/mipsregs.h | 174 +++++-- arch/mips/include/asm/module.h | 2 + arch/mips/include/asm/page.h | 15 +- arch/mips/include/asm/pgtable.h | 14 +- arch/mips/include/asm/ptrace.h | 10 +- arch/mips/include/asm/r4kcache.h | 170 +++---- arch/mips/include/asm/regdef.h | 96 ++-- arch/mips/include/asm/signal.h | 6 + arch/mips/include/asm/sim.h | 52 +- arch/mips/include/asm/stackframe.h | 62 ++- arch/mips/include/asm/stacktrace.h | 28 +- arch/mips/include/asm/string.h | 2 +- arch/mips/include/asm/switch_to.h | 2 + arch/mips/include/asm/syscall.h | 20 +- arch/mips/include/asm/thread_info.h | 5 +- arch/mips/include/asm/uaccess.h | 256 ++-------- arch/mips/include/asm/unistd.h | 17 +- arch/mips/include/asm/uprobes.h | 45 +- arch/mips/include/uapi/asm/break.h | 4 +- arch/mips/include/uapi/asm/errno.h | 18 + arch/mips/include/uapi/asm/fcntl.h | 6 + arch/mips/include/uapi/asm/ptrace.h | 3 +- arch/mips/include/uapi/asm/resource.h | 7 + arch/mips/include/uapi/asm/sgidefs.h | 1 + arch/mips/include/uapi/asm/sigcontext.h | 17 + arch/mips/include/uapi/asm/siginfo.h | 26 +- arch/mips/include/uapi/asm/signal.h | 12 + arch/mips/include/uapi/asm/stat.h | 11 +- arch/mips/include/uapi/asm/statfs.h | 13 + arch/mips/include/uapi/asm/ucontext.h | 93 ++++ arch/mips/include/uapi/asm/unistd.h | 22 +- arch/mips/kernel/Makefile | 13 +- arch/mips/kernel/asm-offsets.c | 37 +- arch/mips/kernel/branch.c | 16 + arch/mips/kernel/cps-vec.S | 241 +++++++--- arch/mips/kernel/cpu-probe.c | 35 +- arch/mips/kernel/csrc-r4k.c | 2 +- arch/mips/kernel/elf.c | 6 +- arch/mips/kernel/genex.S | 66 ++- arch/mips/kernel/idle.c | 21 +- arch/mips/kernel/mips-mt.c | 47 ++ arch/mips/kernel/perf_event.c | 3 +- arch/mips/kernel/perf_event_mipsxx.c | 6 + arch/mips/kernel/pm-cps.c | 18 + arch/mips/kernel/proc.c | 2 + arch/mips/kernel/process.c | 53 +- arch/mips/kernel/ptrace.c | 40 +- arch/mips/kernel/r4k_fpu.S | 23 +- arch/mips/kernel/scall32-p32.S | 88 ++++ arch/mips/kernel/signal.c | 124 ++++- arch/mips/kernel/signal_n32.c | 10 +- arch/mips/kernel/signal_o32.c | 20 +- arch/mips/kernel/smp-cps.c | 7 +- arch/mips/kernel/spram.c | 1 - arch/mips/kernel/stacktrace.c | 4 +- arch/mips/kernel/syscall-nanomips.c | 45 ++ arch/mips/kernel/traps.c | 148 +++++- arch/mips/kernel/unaligned.c | 534 ++++++++++++++++----- arch/mips/kernel/uprobes.c | 76 ++- arch/mips/kernel/watch.c | 137 +++++- arch/mips/lib/Makefile | 13 +- arch/mips/lib/delay.c | 10 +- arch/mips/lib/memcpy.S | 310 +++++------- arch/mips/lib/memmove.c | 39 ++ arch/mips/lib/memset.S | 61 ++- arch/mips/lib/strncpy_user.S | 18 +- arch/mips/lib/strnlen_user.S | 28 +- arch/mips/lib/uncached.c | 4 +- arch/mips/math-emu/cp1emu.c | 2 + arch/mips/math-emu/dsemul.c | 2 + arch/mips/mm/Makefile | 9 +- arch/mips/mm/c-r4k.c | 9 +- arch/mips/mm/cex-gen.S | 1 - arch/mips/mm/sc-mips.c | 4 +- arch/mips/mm/tlb-funcs.S | 156 ++++++ arch/mips/mm/tlbex.c | 22 +- arch/mips/mm/uasm-micromips.c | 1 - arch/mips/mm/uasm-mips.c | 1 - arch/mips/oprofile/backtrace.c | 3 + arch/mips/vdso/Makefile | 8 +- arch/mips/vdso/genvdso.c | 12 +- arch/mips/vdso/genvdso.h | 7 +- arch/mips/vdso/gettimeofday.c | 2 +- arch/mips/vdso/sigreturn.S | 8 +- arch/mn10300/include/asm/unistd.h | 1 + arch/nios2/include/uapi/asm/unistd.h | 2 + arch/openrisc/include/uapi/asm/unistd.h | 2 + arch/parisc/include/asm/unistd.h | 1 + arch/powerpc/include/asm/unistd.h | 1 + arch/s390/include/asm/unistd.h | 1 + arch/score/include/uapi/asm/unistd.h | 2 + arch/sh/include/asm/unistd.h | 1 + arch/sparc/include/asm/unistd.h | 1 + arch/tile/include/uapi/asm/unistd.h | 2 + arch/unicore32/include/uapi/asm/unistd.h | 2 + arch/x86/include/asm/unistd.h | 2 + arch/xtensa/include/asm/unistd.h | 1 + drivers/misc/Kconfig | 24 + drivers/misc/Makefile | 3 + drivers/misc/mips-i7200-spram.c | 378 +++++++++++++++ drivers/misc/mips-itc.c | 214 +++++++++ drivers/misc/mips-uncached-memory.c | 130 +++++ fs/stat.c | 4 + include/asm-generic/audit_dir_write.h | 4 +- include/asm-generic/unistd.h | 2 + include/linux/syscalls.h | 5 + include/linux/uprobes.h | 5 + include/uapi/asm-generic/stat.h | 7 + include/uapi/asm-generic/unistd.h | 15 + include/uapi/linux/elf-em.h | 1 + kernel/events/uprobes.c | 9 +- mm/fadvise.c | 14 + mm/mmap.c | 14 + net/ipv4/ipmr.c | 2 +- scripts/checksyscalls.sh | 11 + scripts/kallsyms.c | 12 +- scripts/mksysmap | 2 +- scripts/sortextable.c | 5 + 167 files changed, 4218 insertions(+), 1408 deletions(-) create mode 100644 Documentation/devicetree/bindings/misc/mti,mips-cpc.txt create mode 100644 arch/mips/configs/generic/nano32r6.config create mode 100644 arch/mips/kernel/scall32-p32.S create mode 100644 arch/mips/kernel/syscall-nanomips.c create mode 100644 arch/mips/lib/memmove.c create mode 100644 drivers/misc/mips-i7200-spram.c create mode 100644 drivers/misc/mips-itc.c create mode 100644 drivers/misc/mips-uncached-memory.c diff --git a/Documentation/devicetree/bindings/misc/mti,mips-cpc.txt b/Documentation/devicetree/bindings/misc/mti,mips-cpc.txt new file mode 100644 index 000000000000..c6b82511ae8a --- /dev/null +++ b/Documentation/devicetree/bindings/misc/mti,mips-cpc.txt @@ -0,0 +1,8 @@ +Binding for MIPS Cluster Power Controller (CPC). + +This binding allows a system to specify where the CPC registers are +located. + +Required properties: +compatible : Should be "mti,mips-cpc". +regs: Should describe the address & size of the CPC register region. diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index d6e29a1de4cc..4db11aa8a113 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h @@ -8,6 +8,7 @@ #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 517178b1daef..caf31079132a 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h @@ -17,6 +17,8 @@ #define _UAPI_ASM_ARC_UNISTD_H #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_VFORK diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 076090d2dbf5..68bc0b5e58a4 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -17,6 +17,7 @@ #include #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_GETPGRP diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h index 5072cbd15c82..b06fa046928f 100644 --- a/arch/arm64/include/uapi/asm/unistd.h +++ b/arch/arm64/include/uapi/asm/unistd.h @@ -16,5 +16,7 @@ */ #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYSCALL_UNXSTAT #include diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h index c8c8ff9eff61..eb0e20e02006 100644 --- a/arch/blackfin/include/asm/unistd.h +++ b/arch/blackfin/include/asm/unistd.h @@ -9,6 +9,7 @@ #include #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE diff --git a/arch/c6x/include/uapi/asm/unistd.h b/arch/c6x/include/uapi/asm/unistd.h index 0d2daf7f9809..cfe4ed2e0ce2 100644 --- a/arch/c6x/include/uapi/asm/unistd.h +++ b/arch/c6x/include/uapi/asm/unistd.h @@ -16,6 +16,8 @@ */ #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_CLONE /* Use the standard ABI for syscalls. */ diff --git a/arch/cris/include/asm/unistd.h b/arch/cris/include/asm/unistd.h index 6a92c0505156..ba8e95272f5a 100644 --- a/arch/cris/include/asm/unistd.h +++ b/arch/cris/include/asm/unistd.h @@ -12,6 +12,7 @@ #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_IPC diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h index b4b3f9b26b81..2bd364255b45 100644 --- a/arch/frv/include/asm/unistd.h +++ b/arch/frv/include/asm/unistd.h @@ -10,6 +10,7 @@ /* #define __ARCH_WANT_OLD_READDIR */ #define __ARCH_WANT_OLD_STAT #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM /* #define __ARCH_WANT_SYS_GETHOSTNAME */ #define __ARCH_WANT_SYS_IPC diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h index 7dd20ef7625a..8109fb2856b3 100644 --- a/arch/h8300/include/uapi/asm/unistd.h +++ b/arch/h8300/include/uapi/asm/unistd.h @@ -1,5 +1,7 @@ #define __ARCH_NOMMU #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYSCALL_UNXSTAT #include diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h index ea181e79162e..b4daee85eb25 100644 --- a/arch/hexagon/include/uapi/asm/unistd.h +++ b/arch/hexagon/include/uapi/asm/unistd.h @@ -29,6 +29,8 @@ #define sys_mmap2 sys_mmap_pgoff #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_VFORK diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index ffb705dc9c13..653b3c846cec 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -14,6 +14,8 @@ #define NR_syscalls 326 /* length of syscall table */ +#define __ARCH_WANT_SYSCALL_UNXSTAT + /* * The following defines stop scripts/checksyscalls.sh from complaining about * unimplemented system calls. Glibc provides for each of these by using diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h index dee4c196972e..426a3ea6f694 100644 --- a/arch/m32r/include/asm/unistd.h +++ b/arch/m32r/include/asm/unistd.h @@ -8,6 +8,7 @@ #define NR_syscalls 326 #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_IPC diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 30d0d3fbd4ef..66a68b16eca8 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -10,6 +10,7 @@ #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_IPC diff --git a/arch/metag/include/uapi/asm/unistd.h b/arch/metag/include/uapi/asm/unistd.h index 9f72c4cfcfb5..935f7542df7e 100644 --- a/arch/metag/include/uapi/asm/unistd.h +++ b/arch/metag/include/uapi/asm/unistd.h @@ -9,6 +9,8 @@ */ #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYSCALL_UNXSTAT /* Use the standard ABI for syscalls. */ #include diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index 9774e1d9507b..de46216d141e 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -16,6 +16,7 @@ /* #define __ARCH_WANT_OLD_READDIR */ /* #define __ARCH_WANT_OLD_STAT */ #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index dc37fbe0c9cc..b0334e8cb2d1 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -36,7 +36,7 @@ config MIPS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT - select HAVE_ARCH_WITHIN_STACK_FRAMES if KALLSYMS + select HAVE_ARCH_WITHIN_STACK_FRAMES if KALLSYMS && !CPU_NANOMIPS select HAVE_CBPF_JIT if (!64BIT && !CPU_MICROMIPS) select HAVE_EBPF_JIT if (64BIT && !CPU_MICROMIPS) select HAVE_CC_STACKPROTECTOR @@ -110,6 +110,7 @@ config MIPS_GENERIC select SYS_HAS_CPU_MIPS64_R1 select SYS_HAS_CPU_MIPS64_R2 select SYS_HAS_CPU_MIPS64_R6 + select SYS_HAS_CPU_NANOMIPS32_R6 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN @@ -507,6 +508,7 @@ config MIPS_MALTA select SYS_HAS_CPU_MIPS64_R1 select SYS_HAS_CPU_MIPS64_R2 select SYS_HAS_CPU_MIPS64_R6 + select SYS_HAS_CPU_NANOMIPS32_R6 select SYS_HAS_CPU_NEVADA select SYS_HAS_CPU_RM7000 select SYS_SUPPORTS_32BIT_KERNEL @@ -1530,6 +1532,19 @@ config CPU_MIPS64_R6 family, are based on a MIPS64r6 processor. If you own an older processor, you probably need to select MIPS64r1 or MIPS64r2 instead. +config CPU_NANOMIPS32_R6 + bool "nanoMIPS32 Release 6" + depends on SYS_HAS_CPU_NANOMIPS32_R6 + select CPU_HAS_PREFETCH + select CPU_NANOMIPS + select CPU_SUPPORTS_32BIT_KERNEL + select CPU_SUPPORTS_HIGHMEM + select GENERIC_CSUM + select MIPS_O32_FP64_SUPPORT + help + Choose this option to build a kernel for release 6 or later of the + nanoMIPS32 architecture. + config CPU_R3000 bool "R3000" depends on SYS_HAS_CPU_R3000 @@ -1728,7 +1743,7 @@ endchoice config CPU_MIPS32_3_5_FEATURES bool "MIPS32 Release 3.5 Features" depends on SYS_HAS_CPU_MIPS32_R3_5 - depends on CPU_MIPS32_R2 || CPU_MIPS32_R6 + depends on CPU_MIPS32_R2 || CPU_MIPS32_R6 || CPU_NANOMIPS32_R6 help Choose this option to build a kernel for release 2 or later of the MIPS32 architecture including features from the 3.5 release such as @@ -1900,6 +1915,9 @@ config SYS_HAS_CPU_MIPS64_R2 config SYS_HAS_CPU_MIPS64_R6 bool +config SYS_HAS_CPU_NANOMIPS32_R6 + bool + config SYS_HAS_CPU_R3000 bool @@ -1990,7 +2008,8 @@ endmenu # config CPU_MIPS32 bool - default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6 + default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6 || \ + CPU_NANOMIPS32_R6 config CPU_MIPS64 bool @@ -2011,7 +2030,7 @@ config CPU_MIPSR2 config CPU_MIPSR6 bool - default y if CPU_MIPS32_R6 || CPU_MIPS64_R6 + default y if CPU_MIPS32_R6 || CPU_MIPS64_R6 || CPU_NANOMIPS32_R6 select CPU_HAS_RIXI select HAVE_ARCH_BITREVERSE select MIPS_ASID_BITS_VARIABLE @@ -2065,7 +2084,7 @@ choice config 32BIT bool "32-bit kernel" depends on CPU_SUPPORTS_32BIT_KERNEL && SYS_SUPPORTS_32BIT_KERNEL - select TRAD_SIGNALS + select TRAD_SIGNALS if !CPU_NANOMIPS help Select this option if you want to build a 32-bit kernel. @@ -2220,9 +2239,30 @@ config CPU_GENERIC_DUMP_TLB bool default y if !(CPU_R3000 || CPU_R8000 || CPU_TX39XX) +config FP_SUPPORT + bool "Floating Point Support" + depends on !CPU_NANOMIPS + default y + help + Select this to enable support for programs which make use of floating + point instructions. This allows the kernel to support initialising, context + switching & emulating the Floating Point Unit (FPU) in order to allow such + programs to execute correctly. + + If you disable this then any program which attempts to execute a floating + point instruction will receive a SIGILL signal & is likely to fail. + + If in doubt, say Y. + +config CPU_R2300_FPU + bool + depends on FP_SUPPORT + default y if CPU_R3000 || CPU_TX39XX + config CPU_R4K_FPU bool - default y if !(CPU_R3000 || CPU_TX39XX) + depends on FP_SUPPORT + default y if !CPU_R2300_FPU config CPU_R4K_CACHE_TLB bool @@ -2231,7 +2271,7 @@ config CPU_R4K_CACHE_TLB config MIPS_MT_SMP bool "MIPS MT SMP support (1 TC on each available VPE)" default y - depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS + depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MICROMIPS select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select SYNC_R4K @@ -2273,7 +2313,7 @@ config MIPS_MT_FPAFF config MIPSR2_TO_R6_EMULATOR bool "MIPS R2-to-R6 emulator" - depends on CPU_MIPSR6 + depends on CPU_MIPSR6 && FP_SUPPORT default y help Choose this option if you want to run non-R6 MIPS userland code. @@ -2394,16 +2434,19 @@ config SB1_PASS_2_1_WORKAROUNDS config ARCH_PHYS_ADDR_T_64BIT bool -choice - prompt "SmartMIPS or microMIPS ASE support" - -config CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS - bool "None" +config CPU_MICROMIPS + depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6 + bool "Compile targeting microMIPS" help - Select this if you want neither microMIPS nor SmartMIPS support + When this option is enabled the kernel will be built using the + microMIPS ISA. + +config CPU_NANOMIPS + bool + select ARCH_WANT_FRAME_POINTERS config CPU_HAS_SMARTMIPS - depends on SYS_SUPPORTS_SMARTMIPS + depends on SYS_SUPPORTS_SMARTMIPS && !CPU_MICROMIPS && !CPU_NANOMIPS bool "SmartMIPS" help SmartMIPS is a extension of the MIPS32 architecture aimed at @@ -2414,19 +2457,11 @@ config CPU_HAS_SMARTMIPS you don't know you probably don't have SmartMIPS and should say N here. -config CPU_MICROMIPS - depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6 - bool "microMIPS" - help - When this option is enabled the kernel will be built using the - microMIPS ISA - -endchoice - config CPU_HAS_MSA bool "Support for the MIPS SIMD Architecture" - depends on CPU_SUPPORTS_MSA + depends on CPU_SUPPORTS_MSA && FP_SUPPORT depends on 64BIT || MIPS_O32_FP64_SUPPORT + depends on !CPU_NANOMIPS help MIPS SIMD Architecture (MSA) introduces 128 bit wide vector registers and a set of SIMD instructions to operate on them. When this option diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index 57e5a40275fc..14fdc59f8f49 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug @@ -179,4 +179,18 @@ config MIPS_CP0_DEBUGFS If unsure, say N. +config MIPS_MT_RAND_SCHED_POLICY + bool "Support randomized MT scheduling policy" + depends on MIPS_MT_SMP + default y if CPU_NANOMIPS + help + Enable this to support randomizing the MT ASE scheduling policy when + the user provides an "mt_random_policy" parameter on the kernel + command line. + + This is useful for debugging CPUs which implement multiple scheduling + policies. + + If in doubt, say N. + endmenu diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 7e014d4a8f63..662030160da3 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -45,6 +45,10 @@ tool-archpref = $(64bit-tool-archpref) UTS_MACHINE := mips64 endif +ifdef CONFIG_CPU_NANOMIPS +LD = $(CROSS_COMPILE)ld.gold +endif + ifneq ($(SUBARCH),$(ARCH)) ifeq ($(CROSS_COMPILE),) CROSS_COMPILE := $(call cc-cross-prefix, $(tool-archpref)-linux- $(tool-archpref)-linux-gnu- $(tool-archpref)-unknown-linux-gnu-) @@ -65,7 +69,9 @@ ld-emul = $(32bit-emul) vmlinux-32 = vmlinux vmlinux-64 = vmlinux.64 -cflags-y += -mabi=32 +abi-y := -mabi=32 +abi-$(CONFIG_CPU_NANOMIPS) := -m32 +cflags-y += $(abi-y) endif ifdef CONFIG_64BIT @@ -90,9 +96,14 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlinuz # machines may also. Since BFD is incredibly buggy with respect to # crossformat linking we rely on the elf2ecoff tool for format conversion. # -cflags-y += -G 0 -mno-abicalls -fno-pic -pipe +abicalls-y := -mno-abicalls +abicalls-$(CONFIG_CPU_NANOMIPS) := +cflags-y += -G 0 $(abicalls-y) -fno-pic -pipe cflags-y += -msoft-float -LDFLAGS_vmlinux += -G 0 -static -n -nostdlib +ifndef CONFIG_CPU_NANOMIPS +LDFLAGS_vmlinux += -G 0 +endif +LDFLAGS_vmlinux += -static -n -nostdlib KBUILD_AFLAGS_MODULE += -mlong-calls KBUILD_CFLAGS_MODULE += -mlong-calls @@ -165,6 +176,7 @@ cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ -Wa,-mips64r2 -Wa,--trap cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap +cflags-$(CONFIG_CPU_NANOMIPS32_R6) += -march=32r6 -Wa,--trap cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ -Wa,--trap @@ -335,7 +347,7 @@ OBJCOPYFLAGS += --remove-section=.reginfo head-y := arch/mips/kernel/head.o libs-y += arch/mips/lib/ -libs-y += arch/mips/math-emu/ +libs-$(CONFIG_FP_SUPPORT) += arch/mips/math-emu/ # See arch/mips/Kbuild for content of core part of the kernel core-y += arch/mips/ @@ -482,7 +494,7 @@ define archhelp echo ' If you are targeting a system supported by generic kernels you may' echo ' configure the kernel for a given architecture target like so:' echo - echo ' {micro32,32,64}{r1,r2,r6}{el,}_defconfig ' + echo ' {nano32,micro32,32,64}{r1,r2,r6}{el,}_defconfig ' echo echo ' Otherwise, the following default configurations are available:' endef @@ -517,6 +529,7 @@ endef $(eval $(call gen_generic_defconfigs,32 64,r1 r2 r6,eb el)) $(eval $(call gen_generic_defconfigs,micro32,r2,eb el)) +$(eval $(call gen_generic_defconfigs,nano32,r6,eb el)) .PHONY: $(generic_defconfigs) $(generic_defconfigs): diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts index 2cd49b60e030..09c128cb84e5 100644 --- a/arch/mips/boot/dts/img/boston.dts +++ b/arch/mips/boot/dts/img/boston.dts @@ -37,6 +37,7 @@ }; pci0: pci@10000000 { + status = "disabled"; compatible = "xlnx,axi-pcie-host-1.00.a"; device_type = "pci"; reg = <0x10000000 0x2000000>; @@ -65,6 +66,7 @@ }; pci1: pci@12000000 { + status = "disabled"; compatible = "xlnx,axi-pcie-host-1.00.a"; device_type = "pci"; reg = <0x12000000 0x2000000>; diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S index 1621995c76b5..cae4bdd3ea55 100644 --- a/arch/mips/cavium-octeon/octeon-memcpy.S +++ b/arch/mips/cavium-octeon/octeon-memcpy.S @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Unified implementation of memcpy, memmove and the __copy_user backend. + * Unified implementation of memcpy and the __copy_user backend. * * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. @@ -18,6 +18,9 @@ #include #include +#define MEMCPY_MODE 1 +#define USER_COPY_MODE 2 + #define dst a0 #define src a1 #define len a2 @@ -40,8 +43,6 @@ * - src is readable (no exceptions when reading src) * copy_from_user * - dst is writable (no exceptions when writing dst) - * __copy_user uses a non-standard calling convention; see - * arch/mips/include/asm/uaccess.h * * When an exception happens on a load, the handler must # ensure that all of the destination buffer is overwritten to prevent @@ -54,28 +55,27 @@ /* * The exception handler for loads requires that: - * 1- AT contain the address of the byte just past the end of the source + * 1- a3 contain the address of the byte just past the end of the source * of the copy, - * 2- src_entry <= src < AT, and + * 2- src_entry <= src < a3, and * 3- (dst - src) == (dst_entry - src_entry), * The _entry suffix denotes values when __copy_user was called. * - * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user + * (1) is set up up by uaccess.h and maintained by not writing a3 in copy_user * (2) is met by incrementing src by the number of bytes copied * (3) is met by not doing loads between a pair of increments of dst and src * * The exception handlers for stores adjust len (if necessary) and return. * These handlers do not need to overwrite any data. - * - * For __rmemcpy and memmove an exception is always a kernel bug, therefore - * they're not protected. */ #define EXC(inst_reg,addr,handler) \ 9: inst_reg, addr; \ - .section __ex_table,"a"; \ - PTR 9b, handler; \ - .previous + .if \mode != MEMCPY_MODE; \ + .section __ex_table,"a"; \ + PTR 9b, handler; \ + .previous; \ + .endif /* * Only on the 64-bit kernel we can made use of 64-bit registers. @@ -97,24 +97,6 @@ #define NBYTES 8 #define LOG_NBYTES 3 -/* - * As we are sharing code base with the mips32 tree (which use the o32 ABI - * register definitions). We need to redefine the register definitions from - * the n64 ABI register naming to the o32 ABI register naming. - */ -#undef t0 -#undef t1 -#undef t2 -#undef t3 -#define t0 $8 -#define t1 $9 -#define t2 $10 -#define t3 $11 -#define t4 $12 -#define t5 $13 -#define t6 $14 -#define t7 $15 - #ifdef CONFIG_CPU_LITTLE_ENDIAN #define LDFIRST LOADR #define LDREST LOADL @@ -137,21 +119,8 @@ .text .set noreorder - .set noat -/* - * A combined memcpy/__copy_user - * __copy_user sets len to 0 for success; else to an upper bound of - * the number of uncopied bytes. - * memcpy sets v0 to dst. - */ - .align 5 -LEAF(memcpy) /* a0=dst a1=src a2=len */ -EXPORT_SYMBOL(memcpy) - move v0, dst /* return value */ -__memcpy: -FEXPORT(__copy_user) -EXPORT_SYMBOL(__copy_user) + .macro __BUILD_COPY_USER mode, uncopied /* * Note: dst & src may be unaligned, len may be 0 * Temps @@ -162,15 +131,15 @@ EXPORT_SYMBOL(__copy_user) # pref 0, 0(src) sltu t0, len, NBYTES # Check if < 1 word - bnez t0, copy_bytes_checklen + bnez t0, .Lcopy_bytes_checklen\@ and t0, src, ADDRMASK # Check if src unaligned - bnez t0, src_unaligned + bnez t0, .Lsrc_unaligned\@ sltu t0, len, 4*NBYTES # Check if < 4 words - bnez t0, less_than_4units + bnez t0, .Lless_than_4units\@ sltu t0, len, 8*NBYTES # Check if < 8 words - bnez t0, less_than_8units + bnez t0, .Lless_than_8units\@ sltu t0, len, 16*NBYTES # Check if < 16 words - bnez t0, cleanup_both_aligned + bnez t0, .Lcleanup_both_aligned\@ sltu t0, len, 128+1 # Check if len < 129 bnez t0, 1f # Skip prefetch if len is too short sltu t0, len, 256+1 # Check if len < 257 @@ -225,10 +194,10 @@ EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u) # # Jump here if there are less than 16*NBYTES left. # -cleanup_both_aligned: - beqz len, done +.Lcleanup_both_aligned\@: + beqz len, .Ldone\@ sltu t0, len, 8*NBYTES - bnez t0, less_than_8units + bnez t0, .Lless_than_8units\@ nop EXC( LOAD t0, UNIT(0)(src), l_exc) EXC( LOAD t1, UNIT(1)(src), l_exc_copy) @@ -248,14 +217,14 @@ EXC( STORE t1, UNIT(5)(dst), s_exc_p3u) EXC( STORE t2, UNIT(6)(dst), s_exc_p2u) EXC( STORE t3, UNIT(7)(dst), s_exc_p1u) ADD src, src, 8*NBYTES - beqz len, done + beqz len, .Ldone\@ ADD dst, dst, 8*NBYTES # # Jump here if there are less than 8*NBYTES left. # -less_than_8units: +.Lless_than_8units\@: sltu t0, len, 4*NBYTES - bnez t0, less_than_4units + bnez t0, .Lless_than_4units\@ nop EXC( LOAD t0, UNIT(0)(src), l_exc) EXC( LOAD t1, UNIT(1)(src), l_exc_copy) @@ -267,15 +236,15 @@ EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) ADD src, src, 4*NBYTES - beqz len, done + beqz len, .Ldone\@ ADD dst, dst, 4*NBYTES # # Jump here if there are less than 4*NBYTES left. This means # we may need to copy up to 3 NBYTES words. # -less_than_4units: +.Lless_than_4units\@: sltu t0, len, 1*NBYTES - bnez t0, copy_bytes_checklen + bnez t0, .Lcopy_bytes_checklen\@ nop # # 1) Copy NBYTES, then check length again @@ -285,7 +254,7 @@ EXC( LOAD t0, 0(src), l_exc) sltu t1, len, 8 EXC( STORE t0, 0(dst), s_exc_p1u) ADD src, src, NBYTES - bnez t1, copy_bytes_checklen + bnez t1, .Lcopy_bytes_checklen\@ ADD dst, dst, NBYTES # # 2) Copy NBYTES, then check length again @@ -295,7 +264,7 @@ EXC( LOAD t0, 0(src), l_exc) sltu t1, len, 8 EXC( STORE t0, 0(dst), s_exc_p1u) ADD src, src, NBYTES - bnez t1, copy_bytes_checklen + bnez t1, .Lcopy_bytes_checklen\@ ADD dst, dst, NBYTES # # 3) Copy NBYTES, then check length again @@ -304,13 +273,13 @@ EXC( LOAD t0, 0(src), l_exc) SUB len, len, NBYTES ADD src, src, NBYTES ADD dst, dst, NBYTES - b copy_bytes_checklen + b .Lcopy_bytes_checklen\@ EXC( STORE t0, -8(dst), s_exc_p1u) -src_unaligned: +.Lsrc_unaligned\@: #define rem t8 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter - beqz t0, cleanup_src_unaligned + beqz t0, .Lcleanup_src_unaligned\@ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 1: /* @@ -336,10 +305,10 @@ EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) bne len, rem, 1b ADD dst, dst, 4*NBYTES -cleanup_src_unaligned: - beqz len, done +.Lcleanup_src_unaligned\@: + beqz len, .Ldone\@ and rem, len, NBYTES-1 # rem = len % NBYTES - beq rem, len, copy_bytes + beq rem, len, .Lcopy_bytes\@ nop 1: EXC( LDFIRST t0, FIRST(0)(src), l_exc) @@ -350,15 +319,15 @@ EXC( STORE t0, 0(dst), s_exc_p1u) bne len, rem, 1b ADD dst, dst, NBYTES -copy_bytes_checklen: - beqz len, done +.Lcopy_bytes_checklen\@: + beqz len, .Ldone\@ nop -copy_bytes: +.Lcopy_bytes\@: /* 0 < len < NBYTES */ #define COPY_BYTE(N) \ EXC( lb t0, N(src), l_exc); \ SUB len, len, 1; \ - beqz len, done; \ + beqz len, .Ldone\@; \ EXC( sb t0, N(dst), s_exc_p1) COPY_BYTE(0) @@ -368,13 +337,15 @@ EXC( sb t0, N(dst), s_exc_p1) COPY_BYTE(4) COPY_BYTE(5) EXC( lb t0, NBYTES-2(src), l_exc) - SUB len, len, 1 + SUB \uncopied, len, 1 jr ra EXC( sb t0, NBYTES-2(dst), s_exc_p1) -done: +.Ldone\@: jr ra - nop - END(memcpy) + move \uncopied, len + + /* memcpy shouldn't generate exceptions */ + .if \mode != MEMCPY_MODE l_exc_copy_rewind16: /* Rewind src and dst by 16*NBYTES for l_exc_copy */ @@ -400,15 +371,15 @@ EXC( lb t1, 0(src), l_exc) ADD dst, dst, 1 l_exc: LOAD t0, THREAD_BUADDR($28) # t0 is just past last good address - SUB len, AT, t0 # len number of uncopied bytes + SUB len, a3, t0 # len number of uncopied bytes jr ra - nop + move \uncopied, len #define SEXC(n) \ s_exc_p ## n ## u: \ jr ra; \ - ADD len, len, n*NBYTES + ADD \uncopied, len, n*NBYTES SEXC(16) SEXC(15) @@ -429,52 +400,52 @@ SEXC(1) s_exc_p1: jr ra - ADD len, len, 1 + ADD \uncopied, len, 1 s_exc: jr ra - nop + move \uncopied, len + .endif /* \mode != MEMCPY_MODE */ + .endm +/* + * memcpy() - Copy memory + * @a0 - destination + * @a1 - source + * @a2 - length + * + * Copy @a2 bytes of memory from @a1 to @a0. + * + * Returns: the destination pointer + */ .align 5 -LEAF(memmove) -EXPORT_SYMBOL(memmove) - ADD t0, a0, a2 - ADD t1, a1, a2 - sltu t0, a1, t0 # dst + len <= src -> memcpy - sltu t1, a0, t1 # dst >= src + len -> memcpy - and t0, t1 - beqz t0, __memcpy - move v0, a0 /* return value */ - beqz a2, r_out - END(memmove) - - /* fall through to __rmemcpy */ -LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ - sltu t0, a1, a0 - beqz t0, r_end_bytes_up # src >= dst - nop - ADD a0, a2 # dst = dst + len - ADD a1, a2 # src = src + len - -r_end_bytes: - lb t0, -1(a1) - SUB a2, a2, 0x1 - sb t0, -1(a0) - SUB a1, a1, 0x1 - bnez a2, r_end_bytes - SUB a0, a0, 0x1 - -r_out: - jr ra - move a2, zero +LEAF(memcpy) /* a0=dst a1=src a2=len */ +EXPORT_SYMBOL(memcpy) + move v0, dst /* return value */ + __BUILD_COPY_USER MEMCPY_MODE len + END(memcpy) -r_end_bytes_up: - lb t0, (a1) - SUB a2, a2, 0x1 - sb t0, (a0) - ADD a1, a1, 0x1 - bnez a2, r_end_bytes_up - ADD a0, a0, 0x1 +/* + * __copy_user() - Copy memory + * @a0 - destination + * @a1 - source + * @a2 - length + * + * Copy @a2 bytes of memory from @a1 to @a0. + * + * Returns: the number of uncopied bytes in @a2 + */ +LEAF(__copy_user) +EXPORT_SYMBOL(__copy_user) + li ta0, 0 /* not inatomic */ +__copy_user_common: + __BUILD_COPY_USER COPY_USER_MODE v0 + END(__copy_user) - jr ra - move a2, zero - END(__rmemcpy) +/* + * ta0 is used as a flag to note inatomic mode. + */ +LEAF(__copy_user_inatomic) +EXPORT_SYMBOL(__copy_user_inatomic) + b __copy_user_common + li ta0, 1 + END(__copy_user_inatomic) diff --git a/arch/mips/configs/generic/nano32r6.config b/arch/mips/configs/generic/nano32r6.config new file mode 100644 index 000000000000..a0378ef30296 --- /dev/null +++ b/arch/mips/configs/generic/nano32r6.config @@ -0,0 +1,2 @@ +CONFIG_CPU_NANOMIPS32_R6=y +CONFIG_CPU_NANOMIPS=y diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index c75c3d2cc6d8..1955a3bd57e4 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -15,6 +15,7 @@ generic-y += qrwlock.h generic-y += qspinlock.h generic-y += sections.h generic-y += segment.h +generic-y += syscalls.h generic-y += trace_clock.h generic-y += unaligned.h generic-y += user.h diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h index dba7f4b6bebf..d821f9306dbb 100644 --- a/arch/mips/include/asm/abi.h +++ b/arch/mips/include/asm/abi.h @@ -11,6 +11,7 @@ #include +#include #include #include #include @@ -22,8 +23,10 @@ struct mips_abi { struct pt_regs *regs, sigset_t *set); const unsigned long restart; +#ifndef __MIPS_REDUCED_SIGCONTEXT unsigned off_sc_fpregs; unsigned off_sc_fpc_csr; +#endif unsigned off_sc_used_math; struct mips_vdso_image *vdso; diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h index 81fae23ce7cd..776df94b5421 100644 --- a/arch/mips/include/asm/asm.h +++ b/arch/mips/include/asm/asm.h @@ -126,7 +126,7 @@ symbol = value #define TEXT(msg) \ .pushsection .data; \ -8: .asciiz msg; \ +8: .asciz msg; \ .popsection; /* @@ -137,7 +137,7 @@ symbol = value .word 1f; \ .popsection \ .pushsection .data; \ -1: .asciiz string; \ +1: .asciz string; \ .popsection /* @@ -151,20 +151,20 @@ symbol = value #define PREF(hint,addr) \ .set push; \ - .set arch=r5000; \ + .set MIPS_ISA_LEVEL_RAW; \ pref hint, addr; \ .set pop #define PREFE(hint, addr) \ .set push; \ - .set mips0; \ + .set MIPS_ISA_LEVEL_RAW; \ .set eva; \ prefe hint, addr; \ .set pop #define PREFX(hint,addr) \ .set push; \ - .set arch=r5000; \ + .set MIPS_ISA_LEVEL_RAW; \ prefx hint, addr; \ .set pop @@ -226,7 +226,9 @@ symbol = value #define ALSZ 7 #define ALMASK ~7 #endif -#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) +#if (_MIPS_SIM == _MIPS_SIM_NABI32) || \ + (_MIPS_SIM == _MIPS_SIM_ABI64) || \ + (_MIPS_SIM == _MIPS_SIM_PABI32) #define ALSZ 15 #define ALMASK ~15 #endif @@ -248,7 +250,7 @@ symbol = value * Use the following macros in assemblercode to load/store registers, * pointers etc. */ -#if (_MIPS_SIM == _MIPS_SIM_ABI32) +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || (_MIPS_SIM == _MIPS_SIM_PABI32) #define REG_S sw #define REG_L lw #define REG_SUBU subu @@ -272,6 +274,7 @@ symbol = value #define INT_SUB sub #define INT_SUBU subu #define INT_L lw +#define INT_LXS lwxs #define INT_S sw #define INT_SLL sll #define INT_SLLV sllv @@ -289,6 +292,7 @@ symbol = value #define INT_SUB dsub #define INT_SUBU dsubu #define INT_L ld +#define INT_LXS ldxs #define INT_S sd #define INT_SLL dsll #define INT_SLLV dsllv @@ -309,6 +313,7 @@ symbol = value #define LONG_SUB sub #define LONG_SUBU subu #define LONG_L lw +#define LONG_LXS lwxs #define LONG_S sw #define LONG_SP swp #define LONG_SLL sll @@ -332,6 +337,7 @@ symbol = value #define LONG_SUB dsub #define LONG_SUBU dsubu #define LONG_L ld +#define LONG_LXS ldxs #define LONG_S sd #define LONG_SP sdp #define LONG_SLL dsll @@ -358,6 +364,7 @@ symbol = value #define PTR_SUB sub #define PTR_SUBU subu #define PTR_L lw +#define PTR_LXS lwxs #define PTR_S sw #define PTR_LA la #define PTR_LI li @@ -383,6 +390,7 @@ symbol = value #define PTR_SUB dsub #define PTR_SUBU dsubu #define PTR_L ld +#define PTR_LXS ldxs #define PTR_S sd #define PTR_LA dla #define PTR_LI dli @@ -403,7 +411,7 @@ symbol = value /* * Some cp0 registers were extended to 64bit for MIPS III. */ -#if (_MIPS_SIM == _MIPS_SIM_ABI32) +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || (_MIPS_SIM == _MIPS_SIM_PABI32) #define MFC0 mfc0 #define MTC0 mtc0 #endif diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index feb069cbf44e..4920f387b5fc 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -107,7 +107,7 @@ .macro fpu_save_16odd thread .set push - .set mips64r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 SET_HARDFLOAT sdc1 $f1, THREAD_FPR1(\thread) @@ -166,7 +166,7 @@ .macro fpu_restore_16odd thread .set push - .set mips64r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 SET_HARDFLOAT ldc1 $f1, THREAD_FPR1(\thread) @@ -211,6 +211,7 @@ .endm #endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */ +#if !defined(__mips_isa_rev) || (__mips_isa_rev < 2) /* * Temporary until all gas have MT ASE support */ @@ -237,11 +238,12 @@ .macro MTTR rt=0, rd=0, u=0, sel=0 .word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel) .endm +#endif #ifdef TOOLCHAIN_SUPPORTS_MSA .macro _cfcmsa rd, cs .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa cfcmsa \rd, $\cs @@ -250,7 +252,7 @@ .macro _ctcmsa cd, rs .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa ctcmsa $\cd, \rs @@ -259,7 +261,7 @@ .macro ld_b wd, off, base .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa ld.b $w\wd, \off(\base) @@ -268,7 +270,7 @@ .macro ld_h wd, off, base .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa ld.h $w\wd, \off(\base) @@ -277,7 +279,7 @@ .macro ld_w wd, off, base .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa ld.w $w\wd, \off(\base) @@ -286,7 +288,7 @@ .macro ld_d wd, off, base .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa ld.d $w\wd, \off(\base) @@ -295,7 +297,7 @@ .macro st_b wd, off, base .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa st.b $w\wd, \off(\base) @@ -304,7 +306,7 @@ .macro st_h wd, off, base .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa st.h $w\wd, \off(\base) @@ -313,7 +315,7 @@ .macro st_w wd, off, base .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa st.w $w\wd, \off(\base) @@ -322,7 +324,7 @@ .macro st_d wd, off, base .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa st.d $w\wd, \off(\base) @@ -331,7 +333,7 @@ .macro copy_s_w ws, n .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa copy_s.w $1, $w\ws[\n] @@ -340,7 +342,7 @@ .macro copy_s_d ws, n .set push - .set mips64r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa copy_s.d $1, $w\ws[\n] @@ -349,7 +351,7 @@ .macro insert_w wd, n .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa insert.w $w\wd[\n], $1 @@ -358,7 +360,7 @@ .macro insert_d wd, n .set push - .set mips64r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 .set msa insert.d $w\wd[\n], $1 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 0ab176bdb8e8..820a18787aec 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -61,11 +61,12 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ \ do { \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ " ll %0, %1 # atomic_" #op "\n" \ " " #asm_op " %0, %2 \n" \ " sc %0, %1 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!temp)); \ @@ -102,11 +103,12 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ \ do { \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ " ll %1, %2 # atomic_" #op "_return \n" \ " " #asm_op " %0, %1, %3 \n" \ " sc %0, %2 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ : "=&r" (result), "=&r" (temp), \ "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ @@ -150,11 +152,12 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ \ do { \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ " ll %1, %2 # atomic_fetch_" #op " \n" \ " " #asm_op " %0, %1, %3 \n" \ " sc %0, %2 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ : "=&r" (result), "=&r" (temp), \ "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ @@ -241,20 +244,21 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) int temp; __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_LEVEL" \n" "1: ll %1, %2 # atomic_sub_if_positive\n" " subu %0, %1, %3 \n" " bltz %0, 1f \n" " sc %0, %2 \n" - " .set noreorder \n" + " subu %1, %1, %3 \n" " beqz %0, 1b \n" - " subu %0, %1, %3 \n" - " .set reorder \n" "1: \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (result), "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); + + result = temp; } else { unsigned long flags; @@ -403,11 +407,12 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ \ do { \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ " lld %0, %1 # atomic64_" #op "\n" \ " " #asm_op " %0, %2 \n" \ " scd %0, %1 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!temp)); \ @@ -444,11 +449,12 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ \ do { \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ " lld %1, %2 # atomic64_" #op "_return\n" \ " " #asm_op " %0, %1, %3 \n" \ " scd %0, %2 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ : "=&r" (result), "=&r" (temp), \ "=" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ @@ -493,11 +499,12 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ \ do { \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ " lld %1, %2 # atomic64_fetch_" #op "\n" \ " " #asm_op " %0, %1, %3 \n" \ " scd %0, %2 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ : "=&r" (result), "=&r" (temp), \ "=" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ @@ -586,20 +593,21 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) long temp; __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_LEVEL" \n" "1: lld %1, %2 # atomic64_sub_if_positive\n" " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" " scd %0, %2 \n" - " .set noreorder \n" + " dsubu %1, %1, %3 \n" " beqz %0, 1b \n" - " dsubu %0, %1, %3 \n" - " .set reorder \n" "1: \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (result), "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); + + result = temp; } else { unsigned long flags; diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index 071736cb5061..11fa7669ec41 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -9,6 +9,7 @@ #define __ASM_BARRIER_H #include +#include /* * Sync types defined by the MIPS architecture (document MD00087 table 6.5) @@ -111,7 +112,7 @@ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ - ".set mips2\n\t" \ + ".set " MIPS_ISA_LEVEL "\n\t" \ "sync\n\t" \ ".set pop" \ : /* no output */ \ @@ -125,7 +126,7 @@ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ - "lw $0,%0\n\t" \ + "lw $zero,%0\n\t" \ "nop\n\t" \ ".set pop" \ : /* no output */ \ @@ -148,9 +149,9 @@ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ - "lw $0,%0\n\t" \ + "lw $zero,%0\n\t" \ "sync\n\t" \ - "lw $0,%0\n\t" \ + "lw $zero,%0\n\t" \ ".set pop" \ : /* no output */ \ : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \ @@ -251,9 +252,10 @@ static inline void ehb(void) { __asm__ __volatile__( - " .set mips32r2 \n" + " .set push \n" + " .set " MIPS_ISA_LEVEL " \n" " ehb \n" - " .set mips0 \n"); + " .set pop \n"); } #include diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index da1b8718861e..b9738ba0bfc1 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -80,11 +80,12 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) } else if (kernel_uses_llsc) { do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # set_bit \n" " or %0, %2 \n" " " __SC "%0, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); @@ -123,7 +124,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) do { __asm__ __volatile__( " " __LL "%0, %1 # clear_bit \n" - " " __INS "%0, $0, %2, 1 \n" + " " __INS "%0, $zero, %2, 1 \n" " " __SC "%0, %1 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (bit)); @@ -132,11 +133,12 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) } else if (kernel_uses_llsc) { do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # clear_bit \n" " and %0, %2 \n" " " __SC "%0, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (~(1UL << bit))); } while (unlikely(!temp)); @@ -190,11 +192,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # change_bit \n" " xor %0, %2 \n" " " __SC "%0, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); @@ -239,11 +242,12 @@ static inline int test_and_set_bit(unsigned long nr, do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); @@ -293,11 +297,12 @@ static inline int test_and_set_bit_lock(unsigned long nr, do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); @@ -352,7 +357,7 @@ static inline int test_and_clear_bit(unsigned long nr, __asm__ __volatile__( " " __LL "%0, %1 # test_and_clear_bit \n" " " __EXT "%2, %0, %3, 1 \n" - " " __INS "%0, $0, %3, 1 \n" + " " __INS "%0, $zero, %3, 1 \n" " " __SC "%0, %1 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "ir" (bit) @@ -365,12 +370,13 @@ static inline int test_and_clear_bit(unsigned long nr, do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_clear_bit \n" " or %2, %0, %3 \n" " xor %2, %3 \n" " " __SC "%2, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); @@ -422,11 +428,12 @@ static inline int test_and_change_bit(unsigned long nr, do { __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_change_bit \n" " xor %2, %0, %3 \n" " " __SC "\t%2, %1 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h index da80878f2c0d..8d150ca967ae 100644 --- a/arch/mips/include/asm/branch.h +++ b/arch/mips/include/asm/branch.h @@ -17,6 +17,7 @@ extern int __isa_exception_epc(struct pt_regs *regs); extern int __compute_return_epc(struct pt_regs *regs); extern int __compute_return_epc_for_insn(struct pt_regs *regs, union mips_instruction insn); +extern int __nanoMIPS_compute_return_epc(struct pt_regs *regs); extern int __microMIPS_compute_return_epc(struct pt_regs *regs); extern int __MIPS16e_compute_return_epc(struct pt_regs *regs); @@ -39,19 +40,25 @@ static inline int mm_isBranchInstr(struct pt_regs *regs, return __mm_isBranchInstr(regs, dec_insn, contpc); } +/* nanoMIPS doesn't have delay slots */ + static inline int delay_slot(struct pt_regs *regs) { + if (cpu_has_nanomips) + return 0; return regs->cp0_cause & CAUSEF_BD; } static inline void clear_delay_slot(struct pt_regs *regs) { - regs->cp0_cause &= ~CAUSEF_BD; + if (!cpu_has_nanomips) + regs->cp0_cause &= ~CAUSEF_BD; } static inline void set_delay_slot(struct pt_regs *regs) { - regs->cp0_cause |= CAUSEF_BD; + if (!cpu_has_nanomips) + regs->cp0_cause |= CAUSEF_BD; } static inline unsigned long exception_epc(struct pt_regs *regs) @@ -59,6 +66,7 @@ static inline unsigned long exception_epc(struct pt_regs *regs) if (likely(!delay_slot(regs))) return regs->cp0_epc; + WARN_ON(cpu_has_nanomips); if (get_isa16_mode(regs->cp0_epc)) return __isa_exception_epc(regs); @@ -70,6 +78,8 @@ static inline unsigned long exception_epc(struct pt_regs *regs) static inline int compute_return_epc(struct pt_regs *regs) { if (get_isa16_mode(regs->cp0_epc)) { + if (cpu_has_nanomips) + return __nanoMIPS_compute_return_epc(regs); if (cpu_has_mmips) return __microMIPS_compute_return_epc(regs); if (cpu_has_mips16) diff --git a/arch/mips/include/asm/bug.h b/arch/mips/include/asm/bug.h index 0832f92ed718..f58d9b75d772 100644 --- a/arch/mips/include/asm/bug.h +++ b/arch/mips/include/asm/bug.h @@ -61,7 +61,7 @@ static inline void __noreturn BUG(void) static inline void __BUG_ON(unsigned long condition) { - if (__builtin_constant_p(condition)) { + if (__builtin_constant_p(condition) || IS_ENABLED(CONFIG_CPU_NANOMIPS)) { if (condition) BUG(); else diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 89e9fb7976fe..aebed613cb49 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -44,18 +44,17 @@ extern unsigned long __xchg_called_with_bad_pointer(void) __typeof(*(m)) __ret; \ \ if (kernel_uses_llsc) { \ + __typeof(*(m)) __tmp; \ __asm__ __volatile__( \ " .set push \n" \ - " .set noat \n" \ " .set " MIPS_ISA_ARCH_LEVEL " \n" \ - "1: " ld " %0, %2 # __xchg_asm \n" \ - " .set mips0 \n" \ - " move $1, %z3 \n" \ - " .set " MIPS_ISA_ARCH_LEVEL " \n" \ - " " st " $1, %1 \n" \ - "\t" __scbeqz " $1, 1b \n" \ + "1: " ld " %0, %3 # __xchg_asm \n" \ + " move %2, %z4 \n" \ + " " st " %2, %1 \n" \ + "\t" __scbeqz " %2, 1b \n" \ " .set pop \n" \ - : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m), \ + "=&r" (__tmp) \ : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \ : "memory"); \ } else { \ @@ -114,20 +113,19 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, __typeof(*(m)) __ret; \ \ if (kernel_uses_llsc) { \ + __typeof(*(m)) __tmp; \ __asm__ __volatile__( \ " .set push \n" \ - " .set noat \n" \ - " .set "MIPS_ISA_ARCH_LEVEL" \n" \ - "1: " ld " %0, %2 # __cmpxchg_asm \n" \ - " bne %0, %z3, 2f \n" \ - " .set mips0 \n" \ - " move $1, %z4 \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \ - " " st " $1, %1 \n" \ - "\t" __scbeqz " $1, 1b \n" \ + "1: " ld " %0, %3 # __cmpxchg_asm \n" \ + " bne %0, %z4, 2f \n" \ + " move %2, %z5 \n" \ + " " st " %2, %1 \n" \ + "\t" __scbeqz " %2, 1b \n" \ " .set pop \n" \ "2: \n" \ - : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m), \ + "=&r" (__tmp) \ : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ : "memory"); \ } else { \ diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h index e081a265f422..70d7d4e97d6a 100644 --- a/arch/mips/include/asm/compiler.h +++ b/arch/mips/include/asm/compiler.h @@ -29,7 +29,12 @@ #endif /* CONFIG_CPU_MICROMIPS */ #endif /* CONFIG_CPU_MIPSR6 */ -#ifdef CONFIG_CPU_MIPSR6 +#ifdef CONFIG_CPU_NANOMIPS +#define MIPS_ISA_LEVEL "arch=32r6" +#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL +#define MIPS_ISA_LEVEL_RAW arch=32r6 +#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW +#elif defined(CONFIG_CPU_MIPSR6) #define MIPS_ISA_LEVEL "mips64r6" #define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL #define MIPS_ISA_LEVEL_RAW mips64r6 diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 37cd85ccbb4f..bd0781b04c5d 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -24,7 +24,12 @@ #define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB) #endif #ifndef cpu_has_tlbinv -#define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV) +# ifdef CONFIG_CPU_NANOMIPS +/* TODO: remove once tlbinvf supported */ +# define cpu_has_tlbinv 0 +# else +# define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV) +# endif #endif #ifndef cpu_has_segments #define cpu_has_segments (cpu_data[0].options & MIPS_CPU_SEGMENTS) @@ -76,10 +81,15 @@ #endif /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */ #ifndef cpu_has_fpu -#define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU) -#define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU) +# ifdef CONFIG_FP_SUPPORT +# define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU) +# define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU) +# else +# define cpu_has_fpu 0 +# define raw_cpu_has_fpu 0 +# endif #else -#define raw_cpu_has_fpu cpu_has_fpu +# define raw_cpu_has_fpu cpu_has_fpu #endif #ifndef cpu_has_32fpr #define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR) @@ -94,7 +104,11 @@ #define cpu_has_divec (cpu_data[0].options & MIPS_CPU_DIVEC) #endif #ifndef cpu_has_vce -#define cpu_has_vce (cpu_data[0].options & MIPS_CPU_VCE) +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 6) +# define cpu_has_vce 0 +# else +# define cpu_has_vce (cpu_data[0].options & MIPS_CPU_VCE) +# endif #endif #ifndef cpu_has_cache_cdex_p #define cpu_has_cache_cdex_p (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_P) @@ -293,6 +307,9 @@ #ifndef cpu_has_mips64r6 # define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6) #endif +#ifndef cpu_has_nanomips32r6 +# define cpu_has_nanomips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_NANO32R6) +#endif /* * Shortcuts ... @@ -312,14 +329,12 @@ (cpu_has_mips_4_5 | cpu_has_mips64r1 | \ cpu_has_mips_r2 | cpu_has_mips_r6) -#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6) +#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6 | cpu_has_nanomips32r6) #define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6) #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2) -#define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6) -#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ - cpu_has_mips32r6 | cpu_has_mips64r1 | \ - cpu_has_mips64r2 | cpu_has_mips64r6) +#define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6 | cpu_has_nanomips32r6) +#define cpu_has_mips_r (cpu_has_mips_r1 | cpu_has_mips_r2 | cpu_has_mips_r6) /* MIPSR2 and MIPSR6 have a lot of similarities */ #define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6) @@ -376,11 +391,19 @@ * DSBH and DSHD. */ #ifndef cpu_has_wsbh -#define cpu_has_wsbh cpu_has_mips_r2 +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) +# define cpu_has_wsbh 1 +# else +# define cpu_has_wsbh cpu_has_mips_r2 +# endif #endif #ifndef cpu_has_dsp -#define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP) +# ifdef CONFIG_CPU_NANOMIPS +# define cpu_has_dsp 0 +# else +# define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP) +# endif #endif #ifndef cpu_has_dsp2 @@ -392,7 +415,7 @@ #endif #ifndef cpu_has_mipsmt -#define cpu_has_mipsmt (cpu_data[0].ases & MIPS_ASE_MIPSMT) +# define cpu_has_mipsmt (cpu_data[0].ases & MIPS_ASE_MIPSMT) #endif #ifndef cpu_has_vp @@ -408,7 +431,11 @@ # define cpu_has_nofpuex (cpu_data[0].options & MIPS_CPU_NOFPUEX) # endif # ifndef cpu_has_64bits -# define cpu_has_64bits (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT) +# ifdef CONFIG_CPU_NANOMIPS +# define cpu_has_64bits 0 +# else +# define cpu_has_64bits (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT) +# endif # endif # ifndef cpu_has_64bit_zero_reg # define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT) @@ -484,7 +511,11 @@ #endif #ifndef cpu_has_vz -#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ) +# ifdef CONFIG_CPU_NANOMIPS +# define cpu_has_vz 0 +# else +# define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ) +# endif #endif #if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa) @@ -601,6 +632,14 @@ # endif #endif +#ifndef cpu_has_nanomips +# ifdef CONFIG_CPU_NANOMIPS +# define cpu_has_nanomips 1 +# else +# define cpu_has_nanomips 0 +# endif +#endif + /* * Guest capabilities */ diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index a45af3de075d..0c554884702d 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -58,7 +58,8 @@ static inline int __pure __get_cpu_type(const int cpu_type) #if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \ defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \ defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \ - defined(CONFIG_SYS_HAS_CPU_MIPS64_R6) + defined(CONFIG_SYS_HAS_CPU_MIPS64_R6) || \ + defined(CONFIG_SYS_HAS_CPU_NANOMIPS32_R6) case CPU_QEMU_GENERIC: #endif @@ -82,6 +83,10 @@ static inline int __pure __get_cpu_type(const int cpu_type) case CPU_M6250: #endif +#ifdef CONFIG_SYS_HAS_CPU_NANOMIPS32_R6 + case CPU_I7200: +#endif + #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R6 case CPU_I6400: case CPU_I6500: diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index eae3b1c7e660..cfd0bc3974fc 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -126,6 +126,7 @@ #define PRID_IMP_I6400 0xa900 #define PRID_IMP_M6250 0xab00 #define PRID_IMP_I6500 0xb000 +#define PRID_IMP_I7200 0xb200 /* * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE @@ -313,7 +314,7 @@ enum cpu_type_enum { CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350, CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC, CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, - CPU_M5150, CPU_I6400, CPU_P6600, CPU_M6250, + CPU_M5150, CPU_I6400, CPU_P6600, CPU_M6250, CPU_I7200, /* * MIPS64 class processors @@ -343,9 +344,10 @@ enum cpu_type_enum { #define MIPS_CPU_ISA_M64R2 0x00000080 #define MIPS_CPU_ISA_M32R6 0x00000100 #define MIPS_CPU_ISA_M64R6 0x00000200 +#define MIPS_CPU_ISA_NANO32R6 0x00000400 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ - MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6) + MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_NANO32R6) #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \ MIPS_CPU_ISA_M64R6) diff --git a/arch/mips/include/asm/dsemul.h b/arch/mips/include/asm/dsemul.h index b47a97527673..f07ff9ab837e 100644 --- a/arch/mips/include/asm/dsemul.h +++ b/arch/mips/include/asm/dsemul.h @@ -13,6 +13,7 @@ #include #include +#include /* Break instruction with special math emu break code set */ #define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16)) @@ -38,8 +39,16 @@ struct task_struct; * * Return: Zero on success, negative if ir is a NOP, signal number on failure. */ +#ifdef CONFIG_FP_SUPPORT extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long branch_pc, unsigned long cont_pc); +#else +static inline int mips_dsemul(struct pt_regs *regs, mips_instruction ir, + unsigned long branch_pc, unsigned long cont_pc) +{ + return SIGILL; +} +#endif /** * do_dsemulret() - Return from a delay slot 'emulation' frame @@ -52,7 +61,14 @@ extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, * * Return: True if an emulation frame was returned from, else false. */ +#ifdef CONFIG_FP_SUPPORT extern bool do_dsemulret(struct pt_regs *xcp); +#else +static inline bool do_dsemulret(struct pt_regs *xcp) +{ + return false; +} +#endif /** * dsemul_thread_cleanup() - Cleanup thread 'emulation' frame @@ -63,7 +79,14 @@ extern bool do_dsemulret(struct pt_regs *xcp); * * Return: True if a frame was freed, else false. */ +#ifdef CONFIG_FP_SUPPORT extern bool dsemul_thread_cleanup(struct task_struct *tsk); +#else +static inline bool dsemul_thread_cleanup(struct task_struct *tsk) +{ + return false; +} +#endif /** * dsemul_thread_rollback() - Rollback from an 'emulation' frame @@ -77,7 +100,14 @@ extern bool dsemul_thread_cleanup(struct task_struct *tsk); * * Return: True if a frame was exited, else false. */ +#ifdef CONFIG_FP_SUPPORT extern bool dsemul_thread_rollback(struct pt_regs *regs); +#else +static inline bool dsemul_thread_rollback(struct pt_regs *regs) +{ + return false; +} +#endif /** * dsemul_mm_cleanup() - Cleanup per-mm delay slot 'emulation' state @@ -87,6 +117,10 @@ extern bool dsemul_thread_rollback(struct pt_regs *regs); * for delay slot 'emulation' book-keeping is freed. This is to be called * before @mm is freed in order to avoid memory leaks. */ +#ifdef CONFIG_FP_SUPPORT extern void dsemul_mm_cleanup(struct mm_struct *mm); +#else +static inline void dsemul_mm_cleanup(struct mm_struct *mm) { } +#endif #endif /* __MIPS_ASM_DSEMUL_H__ */ diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index 0eb1a75be105..1d00d4633d6e 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -219,7 +219,11 @@ void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs); /* * This is used to ensure we don't load something for the wrong architecture. */ -#define elf_check_arch elfo32_check_arch +# ifdef CONFIG_CPU_NANOMIPS +# define elf_check_arch elfp32_check_arch +# else +# define elf_check_arch elfo32_check_arch +# endif /* * These are used to set parameters in the core dumps. @@ -255,7 +259,11 @@ void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs); #elif defined(__MIPSEL__) #define ELF_DATA ELFDATA2LSB #endif +#ifdef CONFIG_CPU_NANOMIPS +#define ELF_ARCH EM_NANOMIPS +#else #define ELF_ARCH EM_MIPS +#endif #endif /* !defined(ELF_ARCH) */ @@ -271,7 +279,7 @@ void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs); # define __MIPS_O32_FP64_MUST_BE_ZERO EF_MIPS_FP64 #endif -#define mips_elf_check_machine(x) ((x)->e_machine == EM_MIPS) +#define mips_elf_check_machine(x) ((x)->e_machine == ELF_ARCH) #define vmcore_elf32_check_arch mips_elf_check_machine #define vmcore_elf64_check_arch mips_elf_check_machine @@ -334,6 +342,22 @@ void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs); __res; \ }) +/* + * Return non-zero if HDR identifies a p32 ELF binary. + */ +#define elfp32_check_arch(hdr) \ +({ \ + int __res = 1; \ + struct elfhdr *__h = (hdr); \ + \ + if (!mips_elf_check_machine(__h)) \ + __res = 0; \ + if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ + __res = 0; \ + \ + __res; \ +}) + struct mips_abi; extern struct mips_abi mips_abi; diff --git a/arch/mips/include/asm/fpregdef.h b/arch/mips/include/asm/fpregdef.h index f184ba088532..e406ab8f612f 100644 --- a/arch/mips/include/asm/fpregdef.h +++ b/arch/mips/include/asm/fpregdef.h @@ -28,7 +28,8 @@ #define SET_HARDFLOAT #endif -#if _MIPS_SIM == _MIPS_SIM_ABI32 +/* FIXME this may need changing when we get proper P32 */ +#if _MIPS_SIM == _MIPS_SIM_ABI32 || _MIPS_SIM == _MIPS_SIM_PABI32 /* * These definitions only cover the R3000-ish 16/32 register model. @@ -67,9 +68,7 @@ #define fs5 $f30 #define fs5f $f31 -#define fcr31 $31 /* FPU status register */ - -#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 || _MIPS_SIM == _MIPS_SIM_PABI32 */ #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 @@ -106,8 +105,8 @@ #define fs6 $f30 #define fs7 $f31 -#define fcr31 $31 - #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ +#define fcr31 $31 /* FPU status register */ + #endif /* _ASM_FPREGDEF_H */ diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index a2813fe381cf..a7b2a6b76c14 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -61,6 +61,9 @@ static inline int __enable_fpu(enum fpu_mode mode) { int fr; + if (!IS_ENABLED(CONFIG_FP_SUPPORT)) + return SIGFPE; + switch (mode) { case FPU_AS_IS: /* just enable the FPU in its current mode */ diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index b36097d3cbf4..62d1f5e767ac 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h @@ -176,9 +176,25 @@ do { \ #define MIPS_FPU_EMU_INC_STATS(M) do { } while (0) #endif /* CONFIG_DEBUG_FS */ +#ifdef CONFIG_FP_SUPPORT + extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, int has_fpu, void __user **fault_addr); + +#else /* !CONFIG_FP_SUPPORT */ + +static inline int fpu_emulator_cop1Handler(struct pt_regs *xcp, + struct mips_fpu_struct *ctx, + int has_fpu, + void __user **fault_addr) +{ + *fault_addr = NULL; + return SIGILL; +} + +#endif /* !CONFIG_FP_SUPPORT */ + void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, struct task_struct *tsk); int process_fpemu_return(int sig, void __user *fault_addr, diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index a9e61ea54ca9..50e6af914fe7 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -29,8 +29,8 @@ " .set mips0 \n" \ " " insn " \n" \ " .set arch=r4000 \n" \ - "2: sc $1, %2 \n" \ - " beqzl $1, 1b \n" \ + "2: sc $at, %2 \n" \ + " beqzl $at, 1b \n" \ __WEAK_LLSC_MB \ "3: \n" \ " .insn \n" \ @@ -53,18 +53,20 @@ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ + " .set push \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ - " .set mips0 \n" \ + " .set pop \n" \ " " insn " \n" \ + " .set push \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \ - "2: "user_sc("$1", "%2")" \n" \ - " beqz $1, 1b \n" \ + "2: "user_sc("$at", "%2")" \n" \ + " .set pop \n" \ + " beqz $at, 1b \n" \ + " .set pop \n" \ __WEAK_LLSC_MB \ "3: \n" \ " .insn \n" \ - " .set pop \n" \ - " .set mips0 \n" \ " .section .fixup,\"ax\" \n" \ "4: li %0, %6 \n" \ " j 3b \n" \ @@ -91,23 +93,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) switch (op) { case FUTEX_OP_SET: - __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg); + __futex_atomic_op("move $at, %z5", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ADD: - __futex_atomic_op("addu $1, %1, %z5", + __futex_atomic_op("addu $at, %1, %z5", ret, oldval, uaddr, oparg); break; case FUTEX_OP_OR: - __futex_atomic_op("or $1, %1, %z5", + __futex_atomic_op("or $at, %1, %z5", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ANDN: - __futex_atomic_op("and $1, %1, %z5", + __futex_atomic_op("and $at, %1, %z5", ret, oldval, uaddr, ~oparg); break; case FUTEX_OP_XOR: - __futex_atomic_op("xor $1, %1, %z5", + __futex_atomic_op("xor $at, %1, %z5", ret, oldval, uaddr, oparg); break; default: @@ -141,10 +143,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "1: ll %1, %3 \n" " bne %1, %z4, 3f \n" " .set mips0 \n" - " move $1, %z5 \n" + " move $at, %z5 \n" " .set arch=r4000 \n" - "2: sc $1, %2 \n" - " beqzl $1, 1b \n" + "2: sc $at, %2 \n" + " beqzl $at, 1b \n" __WEAK_LLSC_MB "3: \n" " .insn \n" @@ -166,14 +168,17 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "# futex_atomic_cmpxchg_inatomic \n" " .set push \n" " .set noat \n" + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" "1: "user_ll("%1", "%3")" \n" + " .set pop \n" " bne %1, %z4, 3f \n" - " .set mips0 \n" - " move $1, %z5 \n" + " move $at, %z5 \n" + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" - "2: "user_sc("$1", "%2")" \n" - " beqz $1, 1b \n" + "2: "user_sc("$at", "%2")" \n" + " .set pop \n" + " beqz $at, 1b \n" __WEAK_LLSC_MB "3: \n" " .insn \n" diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h index e0fecf206f2c..9eb33c99d463 100644 --- a/arch/mips/include/asm/hazards.h +++ b/arch/mips/include/asm/hazards.h @@ -16,8 +16,13 @@ #define ___ssnop \ sll $0, $0, 1 +#ifdef CONFIG_CPU_NANOMIPS +#define ___ehb \ + ehb +#else #define ___ehb \ sll $0, $0, 3 +#endif /* * TLB hazards @@ -53,6 +58,15 @@ #define __back_to_back_c0_hazard \ ___ehb +#ifdef CC_HAVE_ASM_GOTO +#define instruction_hazard() do { \ + void *tgt = &&l_done; \ + asm_volatile_goto("jr.hb\t%0" :: "r"(tgt) :: l_done); \ + unreachable(); \ +l_done: \ + (void)0; \ +} while (0) +#else /* * gcc has a tradition of misscompiling the previous construct using the * address of a label as argument to inline assembler. Gas otoh has the @@ -66,13 +80,15 @@ do { \ unsigned long tmp; \ \ __asm__ __volatile__( \ + " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ " dla %0, 1f \n" \ " jr.hb %0 \n" \ - " .set mips0 \n" \ + " .set pop \n" \ "1: \n" \ : "=r" (tmp)); \ } while (0) +#endif #elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \ defined(CONFIG_CPU_BMIPS) diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 0cbf3af37eca..88f7e0e7a5c0 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -333,13 +333,14 @@ static inline void pfx##write##bwlq(type val, \ if (irq) \ local_irq_save(__flags); \ __asm__ __volatile__( \ - ".set arch=r4000" "\t\t# __writeq""\n\t" \ + ".set push" "\t\t# __writeq""\n\t" \ + ".set arch=r4000" "\n\t" \ "dsll32 %L0, %L0, 0" "\n\t" \ "dsrl32 %L0, %L0, 0" "\n\t" \ "dsll32 %M0, %M0, 0" "\n\t" \ "or %L0, %L0, %M0" "\n\t" \ "sd %L0, %2" "\n\t" \ - ".set mips0" "\n" \ + ".set pop" "\n" \ : "=r" (__tmp) \ : "0" (__val), "m" (*__mem)); \ if (irq) \ @@ -363,11 +364,12 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ if (irq) \ local_irq_save(__flags); \ __asm__ __volatile__( \ - ".set arch=r4000" "\t\t# __readq" "\n\t" \ + ".set push" "\t\t# __readq" "\n\t" \ + ".set arch=r4000" "\n\t" \ "ld %L0, %1" "\n\t" \ "dsra32 %M0, %L0, 0" "\n\t" \ "sll %L0, %L0, 0" "\n\t" \ - ".set mips0" "\n" \ + ".set pop" "\n" \ : "=r" (__val) \ : "m" (*__mem)); \ if (irq) \ diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 9d3610be2323..87404720a7e3 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -63,15 +63,14 @@ static inline void arch_local_irq_restore(unsigned long flags) __asm__ __volatile__( " .set push \n" - " .set noreorder \n" " .set noat \n" #if defined(CONFIG_IRQ_MIPS_CPU) /* * Slow, but doesn't suffer from a relatively unlikely race * condition we're having since days 1. */ - " beqz %[flags], 1f \n" " di \n" + " beqz %[flags], 1f \n" " ei \n" "1: \n" #else diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h index ac8264eca1e9..401091b3ba19 100644 --- a/arch/mips/include/asm/local.h +++ b/arch/mips/include/asm/local.h @@ -49,13 +49,14 @@ static __inline__ long local_add_return(long i, local_t * l) unsigned long temp; __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" "1:" __LL "%1, %2 # local_add_return \n" " addu %0, %1, %3 \n" __SC "%0, %2 \n" " beqz %0, 1b \n" " addu %0, %1, %3 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) : "Ir" (i), "m" (l->a.counter) : "memory"); @@ -94,13 +95,14 @@ static __inline__ long local_sub_return(long i, local_t * l) unsigned long temp; __asm__ __volatile__( + " .set push \n" " .set "MIPS_ISA_ARCH_LEVEL" \n" "1:" __LL "%1, %2 # local_sub_return \n" " subu %0, %1, %3 \n" __SC "%0, %2 \n" " beqz %0, 1b \n" " subu %0, %1, %3 \n" - " .set mips0 \n" + " .set pop \n" : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) : "Ir" (i), "m" (l->a.counter) : "memory"); diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index 2c6bb08fdb39..e4bc1e770f4a 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -176,6 +176,7 @@ GCR_ACCESSOR_RO(32, 0x030, rev) #define CM_REV_CM2 CM_ENCODE_REV(6, 0) #define CM_REV_CM2_5 CM_ENCODE_REV(7, 0) +#define CM_REV_CM2_6 CM_ENCODE_REV(6, 0x10) #define CM_REV_CM3 CM_ENCODE_REV(8, 0) #define CM_REV_CM3_5 CM_ENCODE_REV(9, 0) @@ -423,6 +424,9 @@ static inline unsigned int mips_cm_max_vp_width(void) if (mips_cm_revision() >= CM_REV_CM3) return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW; + if (mips_cm_revision() == CM_REV_CM2_6) + return 4; + if (mips_cm_present()) { /* * We presume that all cores in the system will have the same diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h index b444523ecd50..7fc762296e7b 100644 --- a/arch/mips/include/asm/mips_mt.h +++ b/arch/mips/include/asm/mips_mt.h @@ -25,6 +25,12 @@ extern void mips_mt_set_cpuoptions(void); static inline void mips_mt_set_cpuoptions(void) { } #endif +#ifdef CONFIG_MIPS_MT_RAND_SCHED_POLICY +extern void mips_mt_randomize_sched_policy(void); +#else +static inline void mips_mt_randomize_sched_policy(void) { } +#endif + struct class; extern struct class *mt_class; diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h index 4acfc78bc504..2a68be3f9e7a 100644 --- a/arch/mips/include/asm/mipsmtregs.h +++ b/arch/mips/include/asm/mipsmtregs.h @@ -42,6 +42,8 @@ #define read_c0_tccontext() __read_32bit_c0_register($2, 5) #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val) +#define write_c0_tcschedule(val) __write_32bit_c0_register($2, 6, val) + #else /* Assembly */ /* * Macros for use in assembly language code @@ -177,6 +179,9 @@ /* TCHalt */ #define TCHALT_H (_ULCAST_(1)) +/* TCSchedule (MIPS I7200) */ +#define I7200_TCSCHEDULE_PRIO_EN (_ULCAST_(1) << 1) + #ifndef __ASSEMBLY__ static inline unsigned core_nvpes(void) @@ -190,6 +195,58 @@ static inline unsigned core_nvpes(void) return ((conf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; } +#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) + +static inline unsigned int dvpe(void) +{ + int res; + + asm volatile( + ".set push\n\t" + ".set mt\n\t" + "dvpe\t%0\n\t" + ".set pop" + : "=r"(res)); + instruction_hazard(); + return res; +} + +static inline void __raw_evpe(void) +{ + asm volatile( + ".set push\n\t" + ".set mt\n\t" + "evpe\n\t" + "ehb\n\t" + ".set pop"); +} + +static inline unsigned int dmt(void) +{ + int res; + + asm volatile( + ".set push\n\t" + ".set mt\n\t" + "dmt\t%0\n\t" + ".set pop" + : "=r"(res)); + instruction_hazard(); + return res; +} + +static inline void __raw_emt(void) +{ + asm volatile( + ".set push\n\t" + ".set mt\n\t" + "emt\n\t" + "ehb\n\t" + ".set pop"); +} + +#else + static inline unsigned int dvpe(void) { int res = 0; @@ -222,17 +279,6 @@ static inline void __raw_evpe(void) " .set pop \n"); } -/* Enable virtual processor execution if previous suggested it should be. - EVPE_ENABLE to force */ - -#define EVPE_ENABLE MVPCONTROL_EVP - -static inline void evpe(int previous) -{ - if ((previous & MVPCONTROL_EVP)) - __raw_evpe(); -} - static inline unsigned int dmt(void) { int res; @@ -263,6 +309,19 @@ static inline void __raw_emt(void) " .set reorder"); } +#endif + +/* Enable virtual processor execution if previous suggested it should be. + EVPE_ENABLE to force */ + +#define EVPE_ENABLE MVPCONTROL_EVP + +static inline void evpe(int previous) +{ + if ((previous & MVPCONTROL_EVP)) + __raw_evpe(); +} + /* enable multi-threaded execution if previous suggested it should be. EMT_ENABLE to force */ @@ -274,33 +333,75 @@ static inline void emt(int previous) __raw_emt(); } +#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) + #define mftc0(rt,sel) \ ({ \ unsigned long __res; \ \ __asm__ __volatile__( \ " .set push \n" \ - " .set mips32r2 \n" \ - " .set noat \n" \ - " # mftc0 $1, $" #rt ", " #sel " \n" \ - " .word 0x41000800 | (" #rt " << 16) | " #sel " \n" \ - " move %0, $1 \n" \ + " .set mt \n" \ + " mftc0 %0, $" #rt ", " #sel " \n" \ " .set pop \n" \ : "=r" (__res)); \ \ __res; \ }) +#define mttc0(rd, sel, v) \ +({ \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set mt \n" \ + " mttc0 %0, $" #rd ", " #sel " \n" \ + " .set pop \n" \ + : \ + : "r" (v)); \ +}) + +#ifdef __nanomips__ +# define _r_pfx "r" +#else +# define _r_pfx +#endif + #define mftgpr(rt) \ ({ \ unsigned long __res; \ \ __asm__ __volatile__( \ " .set push \n" \ - " .set noat \n" \ + " .set mt \n" \ + " mftgpr %0, $" _r_pfx #rt " \n" \ + " .set pop \n" \ + : "=r" (__res)); \ + \ + __res; \ +}) + +#define mttgpr(rd,v) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set mt \n" \ + " mttgpr %0, $" _r_pfx #rd " \n" \ + " .set pop \n" \ + : : "r" (v)); \ +} while (0) + +#else + +#define mftc0(rt,sel) \ +({ \ + unsigned long __res; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ " .set mips32r2 \n" \ - " # mftgpr $1," #rt " \n" \ - " .word 0x41000820 | (" #rt " << 16) \n" \ + " .set noat \n" \ + " # mftc0 $1, $" #rt ", " #sel " \n" \ + " .word 0x41000800 | (" #rt " << 16) | " #sel " \n" \ " move %0, $1 \n" \ " .set pop \n" \ : "=r" (__res)); \ @@ -308,12 +409,18 @@ static inline void emt(int previous) __res; \ }) -#define mftr(rt, u, sel) \ +#define mftgpr(rt) \ ({ \ unsigned long __res; \ \ __asm__ __volatile__( \ - " mftr %0, " #rt ", " #u ", " #sel " \n" \ + " .set push \n" \ + " .set noat \n" \ + " .set mips32r2 \n" \ + " # mftgpr $1," #rt " \n" \ + " .word 0x41000820 | (" #rt " << 16) \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ : "=r" (__res)); \ \ __res; \ @@ -346,6 +453,18 @@ do { \ : "r" (v)); \ }) +#endif + +#define mftr(rt, u, sel) \ +({ \ + unsigned long __res; \ + \ + __asm__ __volatile__( \ + " mftr %0, " #rt ", " #u ", " #sel " \n" \ + : "=r" (__res)); \ + \ + __res; \ +}) #define mttr(rd, u, sel, v) \ ({ \ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 5a18d22cf76b..d1ba44b06318 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -668,6 +669,7 @@ #define MIPS_CONF5_GI_IC (_ULCAST_(2) << 15) #define MIPS_CONF5_GI_IC_TLB (_ULCAST_(3) << 15) #define MIPS_CONF5_MI (_ULCAST_(1) << 17) +#define MIPS_CONF5_ULS (_ULCAST_(1) << 20) #define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) #define MIPS_CONF5_EVA (_ULCAST_(1) << 28) #define MIPS_CONF5_CV (_ULCAST_(1) << 29) @@ -704,8 +706,22 @@ #define MIPS_WATCHHI_I (_ULCAST_(1) << 2) #define MIPS_WATCHHI_R (_ULCAST_(1) << 1) #define MIPS_WATCHHI_W (_ULCAST_(1) << 0) +#define MIPS_WATCHHI_RW (_ULCAST_(0x3) << 0) #define MIPS_WATCHHI_IRW (_ULCAST_(0x7) << 0) +/* Special I7200 WATCHHI fields */ + +#define MIPS_WATCHHI_U (_ULCAST_(1) << 16) +#define MIPS_WATCHHI_MTEN_ID_S 11 +/* All TCs of VPE X */ +#define MIPS_WATCHHI_MTEN_ID_VPE (_ULCAST_(0x10) << MIPS_WATCHHI_MTEN_ID_S) +#define MIPS_WATCHHI_WHEXT_SEL (_ULCAST_(3) << 8) +#define MIPS_WATCHHI_I_RSLT (_ULCAST_(1) << 5) +#define MIPS_WATCHHI_R_RSLT (_ULCAST_(1) << 4) +#define MIPS_WATCHHI_W_RSLT (_ULCAST_(1) << 3) +#define MIPS_WATCHHI_IRW_RSLT (_ULCAST_(0x7) << 3) + + /* PerfCnt control register definitions */ #define MIPS_PERFCTRL_EXL (_ULCAST_(1) << 0) #define MIPS_PERFCTRL_K (_ULCAST_(1) << 1) @@ -1133,9 +1149,13 @@ #ifndef __ASSEMBLY__ /* - * Macros for handling the ISA mode bit for MIPS16 and microMIPS. + * Macros for handling the ISA mode bit for MIPS16, microMIPS, and nanoMIPS. */ -#if defined(CONFIG_SYS_SUPPORTS_MIPS16) || \ +#if defined(CONFIG_CPU_NANOMIPS) +#define get_isa16_mode(x) 1 +#define msk_isa16_mode(x) (x) +#define set_isa16_mode(x) do { } while(0) +#elif defined(CONFIG_SYS_SUPPORTS_MIPS16) || \ defined(CONFIG_SYS_SUPPORTS_MICROMIPS) #define get_isa16_mode(x) ((x) & 0x1) #define msk_isa16_mode(x) ((x) & ~0x1) @@ -1146,6 +1166,26 @@ #define set_isa16_mode(x) do { } while(0) #endif +/* + * nanoMIPS instructions can be 16-bit, 32-bit or 48-bit in length. This returns + * the number of bytes a given instruction is based on the first 16-bit word. + */ +static inline unsigned int nanomips_insn_len(u16 insn) +{ + u16 opcode = insn >> 10; + + /* 16-bit instructions */ + if (insn & (1 << 12)) + return 2; + + /* 48-bit instructions */ + if (opcode == 0x18) + return 6; + + /* 32-bit instructions */ + return 4; +} + /* * microMIPS instructions can be 16-bit or 32-bit in length. This * returns a 1 if the instruction is 16-bit and a 0 if 32-bit. @@ -1168,6 +1208,16 @@ static inline int mm_insn_16bit(u16 insn) ".insn\n\t" \ ".hword ((" #_enc ") >> 16)\n\t" \ ".hword ((" #_enc ") & 0xffff)\n\t" +#elif defined(CONFIG_CPU_NANOMIPS) +#define _ASM_INSN16_IF_NM(_enc) \ + ".insn\n\t" \ + ".hword (" #_enc ")\n\t" +#define _ASM_INSN32_IF_NM(_enc) \ + ".insn\n\t" \ + ".hword ((" #_enc ") >> 16)\n\t" \ + ".hword ((" #_enc ") & 0xffff)\n\t" +#define _ASM_SIGRIE_IF_NM() \ + "sigrie\t0\n\t" #else #define _ASM_INSN_IF_MIPS(_enc) \ ".insn\n\t" \ @@ -1180,6 +1230,15 @@ static inline int mm_insn_16bit(u16 insn) #ifndef _ASM_INSN32_IF_MM #define _ASM_INSN32_IF_MM(_enc) #endif +#ifndef _ASM_INSN16_IF_NM +#define _ASM_INSN16_IF_NM(_enc) +#endif +#ifndef _ASM_INSN32_IF_NM +#define _ASM_INSN32_IF_NM(_enc) +#endif +#ifndef _ASM_SIGRIE_IF_NM +#define _ASM_SIGRIE_IF_NM() +#endif #ifndef _ASM_INSN_IF_MIPS #define _ASM_INSN_IF_MIPS(_enc) #endif @@ -1272,12 +1331,18 @@ __asm__(".macro parse_r var r\n\t" */ static inline void tlbinvf(void) { +#if defined(__mips_isa_rev) && (__mips_isa_rev >= 6) + asm volatile("tlbinvf"); + return; +#endif + __asm__ __volatile__( ".set push\n\t" ".set noreorder\n\t" "# tlbinvf\n\t" _ASM_INSN_IF_MIPS(0x42000004) _ASM_INSN32_IF_MM(0x0000537c) + _ASM_SIGRIE_IF_NM() ".set pop"); } @@ -1340,9 +1405,10 @@ do { \ : "=r" (__res)); \ else \ __asm__ vol( \ - ".set\tmips32\n\t" \ + ".set\tpush\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ "mfc0\t%0, " #source ", " #sel "\n\t" \ - ".set\tmips0\n\t" \ + ".set\tpop\n\t" \ : "=r" (__res)); \ __res; \ }) @@ -1352,16 +1418,18 @@ do { \ if (sizeof(unsigned long) == 4) \ __res = __read_64bit_c0_split(source, sel, vol); \ else if (sel == 0) \ - __asm__ vol ( \ - ".set\tmips3\n\t" \ + __asm__ vol ( \ + ".set\tpush\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ "dmfc0\t%0, " #source "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "=r" (__res)); \ else \ - __asm__ vol ( \ - ".set\tmips64\n\t" \ + __asm__ vol( \ + ".set\tpush\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ "dmfc0\t%0, " #source ", " #sel "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "=r" (__res)); \ __res; \ }) @@ -1386,9 +1454,10 @@ do { \ : : "Jr" ((unsigned int)(value))); \ else \ __asm__ __volatile__( \ - ".set\tmips32\n\t" \ + ".set\tpush\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ "mtc0\t%z0, " #register ", " #sel "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : : "Jr" ((unsigned int)(value))); \ } while (0) @@ -1398,15 +1467,17 @@ do { \ __write_64bit_c0_split(register, sel, value); \ else if (sel == 0) \ __asm__ __volatile__( \ - ".set\tmips3\n\t" \ + ".set\tpush\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ "dmtc0\t%z0, " #register "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : : "Jr" (value)); \ else \ __asm__ __volatile__( \ - ".set\tmips64\n\t" \ + ".set\tpush\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ "dmtc0\t%z0, " #register ", " #sel "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : : "Jr" (value)); \ } while (0) @@ -1456,21 +1527,25 @@ do { \ unsigned long __flags; \ \ local_irq_save(__flags); \ - if (sel == 0) \ + if (WARN_ON(IS_ENABLED(CONFIG_CPU_NANOMIPS))) { \ + __val = 0; \ + } else if (sel == 0) \ __asm__ vol( \ - ".set\tmips64\n\t" \ + ".set\tpush\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ "dmfc0\t%L0, " #source "\n\t" \ "dsra\t%M0, %L0, 32\n\t" \ "sll\t%L0, %L0, 0\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "=r" (__val)); \ else \ __asm__ vol( \ - ".set\tmips64\n\t" \ + ".set\tpush\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ "dmfc0\t%L0, " #source ", " #sel "\n\t" \ "dsra\t%M0, %L0, 32\n\t" \ "sll\t%L0, %L0, 0\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "=r" (__val)); \ local_irq_restore(__flags); \ \ @@ -1483,31 +1558,39 @@ do { \ unsigned long __flags; \ \ local_irq_save(__flags); \ - if (sel == 0) \ + if (WARN_ON(IS_ENABLED(CONFIG_CPU_NANOMIPS))) { \ + (void)(val); \ + } else if (sel == 0) \ __asm__ __volatile__( \ - ".set\tmips64\n\t" \ + ".set\tpush\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ "dsll\t%L0, %L1, 32\n\t" \ "dsrl\t%L0, %L0, 32\n\t" \ "dsll\t%M0, %M1, 32\n\t" \ "or\t%L0, %L0, %M0\n\t" \ "dmtc0\t%L0, " #source "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "=&r,r" (__tmp) \ : "r,0" (val)); \ else \ __asm__ __volatile__( \ - ".set\tmips64\n\t" \ + ".set\tpush\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ "dsll\t%L0, %L1, 32\n\t" \ "dsrl\t%L0, %L0, 32\n\t" \ "dsll\t%M0, %M1, 32\n\t" \ "or\t%L0, %L0, %M0\n\t" \ "dmtc0\t%L0, " #source ", " #sel "\n\t" \ - ".set\tmips0" \ + ".set\tpop" \ : "=&r,r" (__tmp) \ : "r,0" (val)); \ local_irq_restore(__flags); \ } while (0) +#if defined(CONFIG_CPU_NANOMIPS) +/* mfhc0 / mthc0 are base instructions in nanoMIPS */ +#define _ASM_SET_XPA "" +#else #ifndef TOOLCHAIN_SUPPORTS_XPA _ASM_MACRO_2R_1S(mfhc0, rt, rs, sel, _ASM_INSN_IF_MIPS(0x40400000 | __rt << 16 | __rs << 11 | \\sel) @@ -1518,7 +1601,8 @@ _ASM_MACRO_2R_1S(mthc0, rt, rd, sel, #define _ASM_SET_XPA "" #else /* !TOOLCHAIN_SUPPORTS_XPA */ #define _ASM_SET_XPA ".set\txpa\n\t" -#endif +#endif /* !TOOLCHAIN_SUPPORTS_XPA */ +#endif /* CONFIG_CPU_NANOMIPS */ #define __readx_32bit_c0_register(source, sel) \ ({ \ @@ -1526,7 +1610,7 @@ _ASM_MACRO_2R_1S(mthc0, rt, rd, sel, \ __asm__ __volatile__( \ " .set push \n" \ - " .set mips32r2 \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ _ASM_SET_XPA \ " mfhc0 %0, " #source ", %1 \n" \ " .set pop \n" \ @@ -1539,7 +1623,7 @@ _ASM_MACRO_2R_1S(mthc0, rt, rd, sel, do { \ __asm__ __volatile__( \ " .set push \n" \ - " .set mips32r2 \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ _ASM_SET_XPA \ " mthc0 %z0, " #register ", %1 \n" \ " .set pop \n" \ @@ -1980,7 +2064,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c) ({ int __res; \ __asm__ __volatile__( \ ".set\tpush\n\t" \ - ".set\tmips32r2\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ _ASM_SET_VIRT \ "mfgc0\t%0, " #source ", %1\n\t" \ ".set\tpop" \ @@ -1997,7 +2081,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c) local_irq_save(__flags); \ __asm__ __volatile__( \ ".set\tpush\n\t" \ - ".set\tmips64r2\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ _ASM_SET_VIRT \ "dmfgc0\t%L0, " #source ", %1\n\t" \ "dsra\t%M0, %L0, 32\n\t" \ @@ -2030,7 +2114,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c) do { \ __asm__ __volatile__( \ ".set\tpush\n\t" \ - ".set\tmips32r2\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ _ASM_SET_VIRT \ "mtgc0\t%z0, " #register ", %1\n\t" \ ".set\tpop" \ @@ -2047,7 +2131,7 @@ do { \ local_irq_save(__flags); \ __asm__ __volatile__( \ ".set\tpush\n\t" \ - ".set\tmips64r2\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ _ASM_SET_VIRT \ "dins\t%0, %1, 32, 32\n\t" \ "dmtgc0\t%0, " #register ", %2\n\t" \ @@ -2065,7 +2149,7 @@ do { \ else \ __asm__ __volatile__( \ ".set\tpush\n\t" \ - ".set\tmips64r2\n\t" \ + ".set\t" MIPS_ISA_LEVEL "\n\t" \ _ASM_SET_VIRT \ "dmtgc0\t%z0, " #register ", %1\n\t" \ ".set\tpop" \ @@ -2303,7 +2387,7 @@ do { \ " .set reorder \n" \ " # gas fails to assemble cfc1 for some archs, \n" \ " # like Octeon. \n" \ - " .set mips1 \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " "STR(gas_hardfloat)" \n" \ " cfc1 %0,"STR(source)" \n" \ " .set pop \n" \ @@ -2554,10 +2638,11 @@ do { \ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ - " # rddsp $1, %x1 \n" \ + " # rddsp $at, %x1 \n" \ _ASM_INSN_IF_MIPS(0x7c000cb8 | (%x1 << 16)) \ _ASM_INSN32_IF_MM(0x0020067c | (%x1 << 14)) \ - " move %0, $1 \n" \ + _ASM_SIGRIE_IF_NM() \ + " move %0, $at \n" \ " .set pop \n" \ : "=r" (__res) \ : "i" (mask)); \ @@ -2569,10 +2654,11 @@ do { \ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ - " move $1, %0 \n" \ - " # wrdsp $1, %x1 \n" \ + " move $at, %0 \n" \ + " # wrdsp $at, %x1 \n" \ _ASM_INSN_IF_MIPS(0x7c2004f8 | (%x1 << 11)) \ _ASM_INSN32_IF_MM(0x0020167c | (%x1 << 14)) \ + _ASM_SIGRIE_IF_NM() \ " .set pop \n" \ : \ : "r" (val), "i" (mask)); \ @@ -2587,7 +2673,8 @@ do { \ " .set noat \n" \ _ASM_INSN_IF_MIPS(0x00000810 | %X1) \ _ASM_INSN32_IF_MM(0x0001007c | %x1) \ - " move %0, $1 \n" \ + _ASM_SIGRIE_IF_NM() \ + " move %0, $at \n" \ " .set pop \n" \ : "=r" (__treg) \ : "i" (ins)); \ @@ -2599,9 +2686,10 @@ do { \ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ - " move $1, %0 \n" \ + " move $at, %0 \n" \ _ASM_INSN_IF_MIPS(0x00200011 | %X1) \ _ASM_INSN32_IF_MM(0x0001207c | %x1) \ + _ASM_SIGRIE_IF_NM() \ " .set pop \n" \ : \ : "r" (val), "i" (ins)); \ @@ -2670,8 +2758,8 @@ static inline void tlb_read(void) " .set noreorder \n" " .set noat \n" " .set mips32r2 \n" - " .word 0x41610001 # dvpe $1 \n" - " move %0, $1 \n" + " .word 0x41610001 # dvpe $at \n" + " move %0, $at \n" " ehb \n" " .set pop \n" : "=r" (res)); diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index 6dc0b21b8acd..40fb3566f8f2 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h @@ -97,6 +97,8 @@ search_module_dbetables(unsigned long addr) #define MODULE_PROC_FAMILY "MIPS64_R2 " #elif defined CONFIG_CPU_MIPS64_R6 #define MODULE_PROC_FAMILY "MIPS64_R6 " +#elif defined CONFIG_CPU_NANOMIPS32_R6 +#define MODULE_PROC_FAMILY "nanoMIPS32_R6 " #elif defined CONFIG_CPU_R3000 #define MODULE_PROC_FAMILY "R3000 " #elif defined CONFIG_CPU_TX39XX diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index ad461216b5a1..06539c5278dd 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -72,9 +72,6 @@ static inline unsigned int page_size_ftlb(unsigned int mmuextdef) #include -extern void build_clear_page(void); -extern void build_copy_page(void); - /* * It's normally defined only for FLATMEM config but it's * used in our early mem init code for all memory models. @@ -82,8 +79,20 @@ extern void build_copy_page(void); */ #define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET) +#ifdef CONFIG_CPU_NANOMIPS +#include + +#define build_clear_page() do {} while (0) +#define build_copy_page() do {} while (0) +#define clear_page(page) memset((page), 0, PAGE_SIZE) +#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) +#else +extern void build_clear_page(void); +extern void build_copy_page(void); + extern void clear_page(void * page); extern void copy_page(void * to, void * from); +#endif extern unsigned long shm_align_mask; diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 1a508a74d48d..612ee4be752e 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -210,7 +210,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) * this atomically. */ unsigned long page_global = _PAGE_GLOBAL; - unsigned long tmp; + unsigned long tmp, tmp2; if (kernel_uses_llsc && R10000_LLSC_WAR) { __asm__ __volatile__ ( @@ -230,19 +230,17 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) : [global] "r" (page_global)); } else if (kernel_uses_llsc) { __asm__ __volatile__ ( - " .set "MIPS_ISA_ARCH_LEVEL" \n" " .set push \n" - " .set noreorder \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" "1:" __LL "%[tmp], %[buddy] \n" + " or %[tmp2], %[tmp], %[global] \n" " bnez %[tmp], 2f \n" - " or %[tmp], %[tmp], %[global] \n" - __SC "%[tmp], %[buddy] \n" - " beqz %[tmp], 1b \n" + __SC "%[tmp2], %[buddy] \n" + " beqz %[tmp2], 1b \n" " nop \n" "2: \n" " .set pop \n" - " .set mips0 \n" - : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) + : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2) : [global] "r" (page_global)); } #else /* !CONFIG_SMP */ diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index b6578611dddb..fbb27f038e3a 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -26,7 +26,7 @@ * arch/mips/kernel/ptrace.c. */ struct pt_regs { -#ifdef CONFIG_32BIT +#if _MIPS_SIM == _MIPS_SIM_ABI32 /* Pad bytes for argument save space on the stack. */ unsigned long pad0[8]; #endif @@ -147,15 +147,23 @@ extern int ptrace_set_watch_regs(struct task_struct *child, static inline int is_syscall_success(struct pt_regs *regs) { +#ifdef CONFIG_CPU_NANOMIPS + return (unsigned long)regs->regs[4] >= (unsigned long)-4095; +#else return !regs->regs[7]; +#endif } static inline long regs_return_value(struct pt_regs *regs) { +#ifdef CONFIG_CPU_NANOMIPS + return regs->regs[4]; +#else if (is_syscall_success(regs) || !user_mode(regs)) return regs->regs[2]; else return -regs->regs[2]; +#endif } #define instruction_pointer(regs) ((regs)->cp0_epc) diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index af869b3b41c6..7fccb14248e2 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -197,7 +197,7 @@ static inline void invalidate_tcache_page(unsigned long addr) __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ - " .set mips3 \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \ " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \ " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \ @@ -223,7 +223,7 @@ static inline void invalidate_tcache_page(unsigned long addr) __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ - " .set mips3 \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \ " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \ " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \ @@ -249,7 +249,7 @@ static inline void invalidate_tcache_page(unsigned long addr) __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ - " .set mips3 \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \ " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \ " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \ @@ -275,7 +275,7 @@ static inline void invalidate_tcache_page(unsigned long addr) __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ - " .set mips3 \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \ " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \ " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \ @@ -307,7 +307,7 @@ static inline void invalidate_tcache_page(unsigned long addr) __asm__ __volatile__( \ " .set push\n" \ " .set noreorder\n" \ - " .set mips64r6\n" \ + " .set " MIPS_ISA_LEVEL "\n" \ " .set noat\n" \ " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \ " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \ @@ -317,15 +317,15 @@ static inline void invalidate_tcache_page(unsigned long addr) " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \ " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \ " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \ - " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x010($1)\n" \ - " cache %1, 0x020($1); cache %1, 0x030($1)\n" \ - " cache %1, 0x040($1); cache %1, 0x050($1)\n" \ - " cache %1, 0x060($1); cache %1, 0x070($1)\n" \ - " cache %1, 0x080($1); cache %1, 0x090($1)\n" \ - " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \ - " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \ - " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \ + " "__stringify(LONG_ADDIU)" $at, %0, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x010($at)\n" \ + " cache %1, 0x020($at); cache %1, 0x030($at)\n" \ + " cache %1, 0x040($at); cache %1, 0x050($at)\n" \ + " cache %1, 0x060($at); cache %1, 0x070($at)\n" \ + " cache %1, 0x080($at); cache %1, 0x090($at)\n" \ + " cache %1, 0x0a0($at); cache %1, 0x0b0($at)\n" \ + " cache %1, 0x0c0($at); cache %1, 0x0d0($at)\n" \ + " cache %1, 0x0e0($at); cache %1, 0x0f0($at)\n" \ " .set pop\n" \ : \ : "r" (base), \ @@ -335,27 +335,27 @@ static inline void invalidate_tcache_page(unsigned long addr) __asm__ __volatile__( \ " .set push\n" \ " .set noreorder\n" \ - " .set mips64r6\n" \ + " .set " MIPS_ISA_LEVEL "\n" \ " .set noat\n" \ " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \ " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \ " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \ " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \ - " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ - " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ - " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ - " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ - " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ - " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ - " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \ - " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ - " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ - " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ - " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ + " "__stringify(LONG_ADDIU)" $at, %0, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x020($at)\n" \ + " cache %1, 0x040($at); cache %1, 0x060($at)\n" \ + " cache %1, 0x080($at); cache %1, 0x0a0($at)\n" \ + " cache %1, 0x0c0($at); cache %1, 0x0e0($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x020($at)\n" \ + " cache %1, 0x040($at); cache %1, 0x060($at)\n" \ + " cache %1, 0x080($at); cache %1, 0x0a0($at)\n" \ + " cache %1, 0x0c0($at); cache %1, 0x0e0($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x020($at)\n" \ + " cache %1, 0x040($at); cache %1, 0x060($at)\n" \ + " cache %1, 0x080($at); cache %1, 0x0a0($at)\n" \ + " cache %1, 0x0c0($at); cache %1, 0x0e0($at)\n" \ " .set pop\n" \ : \ : "r" (base), \ @@ -365,31 +365,31 @@ static inline void invalidate_tcache_page(unsigned long addr) __asm__ __volatile__( \ " .set push\n" \ " .set noreorder\n" \ - " .set mips64r6\n" \ + " .set " MIPS_ISA_LEVEL "\n" \ " .set noat\n" \ " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \ " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \ - " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ - " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " "__stringify(LONG_ADDIU)" $at, %0, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x040($at)\n" \ + " cache %1, 0x080($at); cache %1, 0x0c0($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x040($at)\n" \ + " cache %1, 0x080($at); cache %1, 0x0c0($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x040($at)\n" \ + " cache %1, 0x080($at); cache %1, 0x0c0($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x040($at)\n" \ + " cache %1, 0x080($at); cache %1, 0x0c0($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x040($at)\n" \ + " cache %1, 0x080($at); cache %1, 0x0c0($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x040($at)\n" \ + " cache %1, 0x080($at); cache %1, 0x0c0($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x040($at)\n" \ + " cache %1, 0x080($at); cache %1, 0x0c0($at)\n" \ " .set pop\n" \ : \ : "r" (base), \ @@ -399,41 +399,41 @@ static inline void invalidate_tcache_page(unsigned long addr) __asm__ __volatile__( \ " .set push\n" \ " .set noreorder\n" \ - " .set mips64r6\n" \ + " .set " MIPS_ISA_LEVEL "\n" \ " .set noat\n" \ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ - " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ - " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ - " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ + " "__stringify(LONG_ADDIU)" $at, %0, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ + " "__stringify(LONG_ADDIU)" $at, $at, 0x100\n" \ + " cache %1, 0x000($at); cache %1, 0x080($at)\n" \ " .set pop\n" \ : \ : "r" (base), \ diff --git a/arch/mips/include/asm/regdef.h b/arch/mips/include/asm/regdef.h index 3c687df1d515..ca41a003a9b0 100644 --- a/arch/mips/include/asm/regdef.h +++ b/arch/mips/include/asm/regdef.h @@ -14,19 +14,38 @@ #include -#if _MIPS_SIM == _MIPS_SIM_ABI32 - /* - * Symbolic register names for 32 bit ABI + * Register usage common to all MIPS ABIs */ #define zero $0 /* wired zero */ -#define AT $1 /* assembler temp - uppercase because of ".set at" */ -#define v0 $2 /* return value */ -#define v1 $3 +#define AT $at /* assembler temp - uppercase because of ".set at" */ #define a0 $4 /* argument registers */ #define a1 $5 #define a2 $6 #define a3 $7 +#define s0 $16 /* callee saved */ +#define s1 $17 +#define s2 $18 +#define s3 $19 +#define s4 $20 +#define s5 $21 +#define s6 $22 +#define s7 $23 +#define k0 $26 /* kernel scratch */ +#define k1 $27 +#define gp $28 /* global pointer - caller saved for PIC */ +#define sp $29 /* stack pointer */ +#define fp $30 /* frame pointer */ +#define s8 $30 /* same like fp! */ +#define ra $31 /* return address */ + +#if _MIPS_SIM == _MIPS_SIM_ABI32 + +/* + * Register usage specific to the o32 ABI + */ +#define v0 $2 /* return value */ +#define v1 $3 #define t0 $8 /* caller saved */ #define t1 $9 #define t2 $10 @@ -39,37 +58,19 @@ #define ta2 $14 #define t7 $15 #define ta3 $15 -#define s0 $16 /* callee saved */ -#define s1 $17 -#define s2 $18 -#define s3 $19 -#define s4 $20 -#define s5 $21 -#define s6 $22 -#define s7 $23 #define t8 $24 /* caller saved */ #define t9 $25 #define jp $25 /* PIC jump register */ -#define k0 $26 /* kernel scratch */ -#define k1 $27 -#define gp $28 /* global pointer */ -#define sp $29 /* stack pointer */ -#define fp $30 /* frame pointer */ -#define s8 $30 /* same like fp! */ -#define ra $31 /* return address */ #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 -#define zero $0 /* wired zero */ -#define AT $at /* assembler temp - uppercase because of ".set at" */ +/* + * Register usage specific to the n32 & n64 ABIs + */ #define v0 $2 /* return value - caller saved */ #define v1 $3 -#define a0 $4 /* argument registers */ -#define a1 $5 -#define a2 $6 -#define a3 $7 #define a4 $8 /* arg reg 64 bit; caller saved in 32 bit */ #define ta0 $8 #define a5 $9 @@ -82,25 +83,36 @@ #define t1 $13 #define t2 $14 #define t3 $15 -#define s0 $16 /* callee saved */ -#define s1 $17 -#define s2 $18 -#define s3 $19 -#define s4 $20 -#define s5 $21 -#define s6 $22 -#define s7 $23 #define t8 $24 /* caller saved */ #define t9 $25 /* callee address for PIC/temp */ #define jp $25 /* PIC jump register */ -#define k0 $26 /* kernel temporary */ -#define k1 $27 -#define gp $28 /* global pointer - caller saved for PIC */ -#define sp $29 /* stack pointer */ -#define fp $30 /* frame pointer */ -#define s8 $30 /* callee saved */ -#define ra $31 /* return address */ #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ +#if _MIPS_SIM == _MIPS_SIM_PABI32 + +/* + * Register usage specific to the p32 ABI + */ +#define t4 $2 +#define t5 $3 +#define v0 $4 /* return values */ +#define v1 $5 +#define a4 $8 +#define ta0 $8 +#define a5 $9 +#define ta1 $9 +#define a6 $10 +#define ta2 $10 +#define a7 $11 +#define ta3 $11 +#define t0 $12 +#define t1 $13 +#define t2 $14 +#define t3 $15 +#define t8 $24 +#define t9 $25 + +#endif /* _MIPS_SIM == _MIPS_SIM_PABI32 */ + #endif /* _ASM_REGDEF_H */ diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 23d6b8015c79..9c68330f4037 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h @@ -27,8 +27,14 @@ extern struct mips_abi mips_abi_32; #include #include +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || \ + (_MIPS_SIM == _MIPS_SIM_NABI32) || \ + (_MIPS_SIM == _MIPS_SIM_ABI64) + #define __ARCH_HAS_IRIX_SIGACTION +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 or _MIPS_SIM_NABI32 or _MIPS_SIM_ABI64 */ + extern int protected_save_fp_context(void __user *sc); extern int protected_restore_fp_context(void __user *sc); diff --git a/arch/mips/include/asm/sim.h b/arch/mips/include/asm/sim.h index 91831800c480..54655bc23488 100644 --- a/arch/mips/include/asm/sim.h +++ b/arch/mips/include/asm/sim.h @@ -25,22 +25,20 @@ __asm__( \ ".type\t__" #symbol ", @function\n\t" \ ".ent\t__" #symbol ", 0\n__" \ #symbol":\n\t" \ - ".frame\t$29, 0, $31\n\t" \ - "sw\t$16,"__str(PT_R16)"($29)\t\t\t# save_static_function\n\t" \ - "sw\t$17,"__str(PT_R17)"($29)\n\t" \ - "sw\t$18,"__str(PT_R18)"($29)\n\t" \ - "sw\t$19,"__str(PT_R19)"($29)\n\t" \ - "sw\t$20,"__str(PT_R20)"($29)\n\t" \ - "sw\t$21,"__str(PT_R21)"($29)\n\t" \ - "sw\t$22,"__str(PT_R22)"($29)\n\t" \ - "sw\t$23,"__str(PT_R23)"($29)\n\t" \ - "sw\t$30,"__str(PT_R30)"($29)\n\t" \ + ".frame\t$sp, 0, $ra\n\t" \ + "sw\t$s0,"__str(PT_R16)"($sp)\t\t\t# save_static_function\n\t" \ + "sw\t$s1,"__str(PT_R17)"($sp)\n\t" \ + "sw\t$s2,"__str(PT_R18)"($sp)\n\t" \ + "sw\t$s3,"__str(PT_R19)"($sp)\n\t" \ + "sw\t$s4,"__str(PT_R20)"($sp)\n\t" \ + "sw\t$s5,"__str(PT_R21)"($sp)\n\t" \ + "sw\t$s6,"__str(PT_R22)"($sp)\n\t" \ + "sw\t$s7,"__str(PT_R23)"($sp)\n\t" \ + "sw\t$fp,"__str(PT_R30)"($sp)\n\t" \ "j\t" #symbol "\n\t" \ ".end\t__" #symbol "\n\t" \ ".size\t__" #symbol",. - __" #symbol) -#define nabi_no_regargs - #endif /* CONFIG_32BIT */ #ifdef CONFIG_64BIT @@ -53,30 +51,20 @@ __asm__( \ ".type\t__" #symbol ", @function\n\t" \ ".ent\t__" #symbol ", 0\n__" \ #symbol":\n\t" \ - ".frame\t$29, 0, $31\n\t" \ - "sd\t$16,"__str(PT_R16)"($29)\t\t\t# save_static_function\n\t" \ - "sd\t$17,"__str(PT_R17)"($29)\n\t" \ - "sd\t$18,"__str(PT_R18)"($29)\n\t" \ - "sd\t$19,"__str(PT_R19)"($29)\n\t" \ - "sd\t$20,"__str(PT_R20)"($29)\n\t" \ - "sd\t$21,"__str(PT_R21)"($29)\n\t" \ - "sd\t$22,"__str(PT_R22)"($29)\n\t" \ - "sd\t$23,"__str(PT_R23)"($29)\n\t" \ - "sd\t$30,"__str(PT_R30)"($29)\n\t" \ + ".frame\t$sp, 0, $ra\n\t" \ + "sd\t$s0,"__str(PT_R16)"($sp)\t\t\t# save_static_function\n\t" \ + "sd\t$s1,"__str(PT_R17)"($sp)\n\t" \ + "sd\t$s2,"__str(PT_R18)"($sp)\n\t" \ + "sd\t$s3,"__str(PT_R19)"($sp)\n\t" \ + "sd\t$s4,"__str(PT_R20)"($sp)\n\t" \ + "sd\t$s5,"__str(PT_R21)"($sp)\n\t" \ + "sd\t$s6,"__str(PT_R22)"($sp)\n\t" \ + "sd\t$s7,"__str(PT_R23)"($sp)\n\t" \ + "sd\t$fp,"__str(PT_R30)"($sp)\n\t" \ "j\t" #symbol "\n\t" \ ".end\t__" #symbol "\n\t" \ ".size\t__" #symbol",. - __" #symbol) -#define nabi_no_regargs \ - unsigned long __dummy0, \ - unsigned long __dummy1, \ - unsigned long __dummy2, \ - unsigned long __dummy3, \ - unsigned long __dummy4, \ - unsigned long __dummy5, \ - unsigned long __dummy6, \ - unsigned long __dummy7, - #endif /* CONFIG_64BIT */ #endif /* _ASM_SIM_H */ diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 1416c2808111..421c57bac64f 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -17,6 +17,7 @@ #include #include #include +#include #include /* Make the addition of cfi info a little easier. */ @@ -123,7 +124,9 @@ #elif !defined(CONFIG_CPU_MIPSR6) mfhi v1 #endif -#ifdef CONFIG_32BIT +#if _MIPS_SIM == _MIPS_SIM_PABI32 + cfi_st $3, PT_R3, \docfi +#elif defined(CONFIG_32BIT) cfi_st $8, PT_R8, \docfi cfi_st $9, PT_R9, \docfi #endif @@ -188,7 +191,12 @@ .macro set_saved_ti ti temp ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG LONG_SRL \temp, SMP_CPUID_PTRSHIFT +#ifdef __nanomips__ + addiu \temp, \temp, thread_info_ptr + LONG_S \ti, 0(\temp) +#else LONG_S \ti, thread_info_ptr(\temp) +#endif .endm #else /* !CONFIG_SMP */ .macro get_saved_ti out temp /* Uniprocessor variation */ @@ -269,25 +277,27 @@ .endif cfi_st k0, PT_R29, \docfi cfi_rel_offset sp, PT_R29, \docfi +#if _MIPS_SIM != _MIPS_SIM_PABI32 cfi_st v1, PT_R3, \docfi +#endif /* * You might think that you don't need to save $0, * but the FPU emulator and gdb remote debug stub * need it to operate correctly */ LONG_S $0, PT_R0(sp) - mfc0 v1, CP0_STATUS - cfi_st v0, PT_R2, \docfi - LONG_S v1, PT_STATUS(sp) + mfc0 k0, CP0_STATUS + cfi_st $2, PT_R2, \docfi + LONG_S k0, PT_STATUS(sp) cfi_st $4, PT_R4, \docfi - mfc0 v1, CP0_CAUSE + mfc0 k0, CP0_CAUSE cfi_st $5, PT_R5, \docfi - LONG_S v1, PT_CAUSE(sp) + LONG_S k0, PT_CAUSE(sp) cfi_st $6, PT_R6, \docfi cfi_st ra, PT_R31, \docfi MFC0 ra, CP0_EPC cfi_st $7, PT_R7, \docfi -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) || (_MIPS_SIM == _MIPS_SIM_PABI32) cfi_st $8, PT_R8, \docfi cfi_st $9, PT_R9, \docfi #endif @@ -340,7 +350,9 @@ LONG_L $24, PT_HI(sp) mthi $24 #endif -#ifdef CONFIG_32BIT +#if _MIPS_SIM == _MIPS_SIM_PABI32 + cfi_ld $3, PT_R3, \docfi +#elif defined(CONFIG_32BIT) cfi_ld $8, PT_R8, \docfi cfi_ld $9, PT_R9, \docfi #endif @@ -408,6 +420,40 @@ .set pop .endm +#elif _MIPS_SIM == _MIPS_SIM_PABI32 + + .macro RESTORE_SOME docfi=0 + .set push + .set reorder + .set noat + LONG_L a2, PT_STATUS(sp) + mfc0 a0, CP0_STATUS + li a1, ST0_CU1 | ST0_FR | ST0_IM + and a0, a0, a1 + or a2, a2, a1 + xor a2, a2, a1 + LONG_L a1, PT_EPC(sp) + or a2, a0 + mtc0 a2, CP0_STATUS + MTC0 a1, CP0_EPC + cfi_ld $31, PT_R31, \docfi + cfi_ld $28, PT_R28, \docfi + cfi_ld $25, PT_R25, \docfi + cfi_ld $9, PT_R9, \docfi + cfi_ld $8, PT_R8, \docfi + cfi_ld $7, PT_R7, \docfi + cfi_ld $6, PT_R6, \docfi + cfi_ld $5, PT_R5, \docfi + cfi_ld $4, PT_R4, \docfi + cfi_ld $2, PT_R2, \docfi + .set pop + .endm + + .macro RESTORE_SP_AND_RET docfi=0 + RESTORE_SP \docfi + eretnc + .endm + #else .macro RESTORE_SOME docfi=0 .set push diff --git a/arch/mips/include/asm/stacktrace.h b/arch/mips/include/asm/stacktrace.h index 8ad25c25b4f8..ace14adf5ad3 100644 --- a/arch/mips/include/asm/stacktrace.h +++ b/arch/mips/include/asm/stacktrace.h @@ -9,15 +9,18 @@ #ifdef CONFIG_KALLSYMS extern int raw_show_trace; extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, - unsigned long pc, unsigned long *ra); + unsigned long *fp, unsigned long pc, + unsigned long *ra); extern unsigned long unwind_stack_by_address(unsigned long stack_page, unsigned long *sp, + unsigned long *fp, unsigned long pc, unsigned long *ra); #else #define raw_show_trace 1 static inline unsigned long unwind_stack(struct task_struct *task, - unsigned long *sp, unsigned long pc, unsigned long *ra) + unsigned long *sp, unsigned long *fp, unsigned long pc, + unsigned long *ra) { return 0; } @@ -28,8 +31,15 @@ static inline unsigned long unwind_stack(struct task_struct *task, #define STR_LONG_L __stringify(LONG_L) #define STR_LONGSIZE __stringify(LONGSIZE) +#ifdef __nanomips__ +#define INLINE_ASM_REG_PREFIX "$r" +#else +#define INLINE_ASM_REG_PREFIX "$" +#endif + #define STORE_ONE_REG(r) \ - STR_LONG_S " $" __stringify(r)",("STR_LONGSIZE"*"__stringify(r)")(%1)\n\t" + STR_LONG_S " " INLINE_ASM_REG_PREFIX __stringify(r) ",(" \ + STR_LONGSIZE "*" __stringify(r) ")(%1)\n\t" static __always_inline void prepare_frametrace(struct pt_regs *regs) { @@ -43,11 +53,11 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs) __asm__ __volatile__( ".set push\n\t" ".set noat\n\t" - /* Store $1 so we can use it */ - STR_LONG_S " $1,"STR_LONGSIZE"(%1)\n\t" + /* Store $at so we can use it */ + STR_LONG_S " $at,"STR_LONGSIZE"(%1)\n\t" /* Store the PC */ - "1: " STR_PTR_LA " $1, 1b\n\t" - STR_LONG_S " $1,%0\n\t" + "1: " STR_PTR_LA " $at, 1b\n\t" + STR_LONG_S " $at,%0\n\t" STORE_ONE_REG(2) STORE_ONE_REG(3) STORE_ONE_REG(4) @@ -78,8 +88,8 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs) STORE_ONE_REG(29) STORE_ONE_REG(30) STORE_ONE_REG(31) - /* Restore $1 */ - STR_LONG_L " $1,"STR_LONGSIZE"(%1)\n\t" + /* Restore $at */ + STR_LONG_L " $at,"STR_LONGSIZE"(%1)\n\t" ".set pop\n\t" : "=m" (regs->cp0_epc) : "r" (regs->regs) diff --git a/arch/mips/include/asm/string.h b/arch/mips/include/asm/string.h index 29030cb398ee..f7c19514dd42 100644 --- a/arch/mips/include/asm/string.h +++ b/arch/mips/include/asm/string.h @@ -15,7 +15,7 @@ * Most of the inline functions are rather naive implementations so I just * didn't bother updating them for 64-bit ... */ -#ifdef CONFIG_32BIT +#if defined(CONFIG_32BIT) && !defined(CONFIG_CPU_NANOMIPS) #ifndef IN_STRING_C diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 446cf3e80276..04beaace177e 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -17,6 +17,7 @@ #include #include #include +#include struct task_struct; @@ -129,6 +130,7 @@ do { \ if (cpu_has_userlocal) \ write_c0_userlocal(task_thread_info(next)->tp_value); \ __restore_watch(next); \ + mips_mt_randomize_sched_policy(); \ (last) = resume(prev, next); \ } while (0) diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 0170602a1e4e..1b4d29285e62 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -30,7 +30,8 @@ static inline bool mips_syscall_is_indirect(struct task_struct *task, struct pt_regs *regs) { /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ - return (IS_ENABLED(CONFIG_32BIT) || + return !IS_ENABLED(CONFIG_CPU_NANOMIPS) && + (IS_ENABLED(CONFIG_32BIT) || test_tsk_thread_flag(task, TIF_32BIT_REGS)) && (regs->regs[2] == __NR_syscall); } @@ -65,12 +66,10 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg, return 0; -#ifdef CONFIG_32BIT +#if defined(CONFIG_32BIT) && !defined(CONFIG_CPU_NANOMIPS) case 4: case 5: case 6: case 7: return get_user(*arg, (int *)usp + n); -#endif - -#ifdef CONFIG_64BIT +#else case 4: case 5: case 6: case 7: #ifdef CONFIG_MIPS32_O32 if (test_thread_flag(TIF_32BIT_REGS)) @@ -92,7 +91,11 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg, static inline long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) { +#ifdef CONFIG_CPU_NANOMIPS + return regs->regs[4]; +#else return regs->regs[2]; +#endif } static inline void syscall_rollback(struct task_struct *task, @@ -105,6 +108,12 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { +#ifdef CONFIG_CPU_NANOMIPS + if (error) + regs->regs[4] = error; + else + regs->regs[4] = val; +#else if (error) { regs->regs[2] = -error; regs->regs[7] = 1; @@ -112,6 +121,7 @@ static inline void syscall_set_return_value(struct task_struct *task, regs->regs[2] = val; regs->regs[7] = 0; } +#endif } static inline void syscall_get_arguments(struct task_struct *task, diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 461b819ce6b7..bfc34861bfb6 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -34,6 +34,7 @@ static inline int arch_within_stack_frames(const void *const stack, extern unsigned long unwind_stack_by_address( unsigned long stack_page, unsigned long *sp, + unsigned long *fp, unsigned long pc, unsigned long *ra); unsigned long sp, lastsp, ra, pc; @@ -52,7 +53,7 @@ static inline int arch_within_stack_frames(const void *const stack, * copy_{to,from}_user() (inlined into do_usercopy_stack) */ for (skip_frames = 0; skip_frames < 2; skip_frames++) { - pc = unwind_stack_by_address((unsigned long)stack, &sp, pc, &ra); + pc = unwind_stack_by_address((unsigned long)stack, &sp, NULL, pc, &ra); if (!pc) return BAD_STACK; } @@ -72,7 +73,7 @@ static inline int arch_within_stack_frames(const void *const stack, */ do { lastsp = sp; - pc = unwind_stack_by_address((unsigned long)stack, &sp, pc, &ra); + pc = unwind_stack_by_address((unsigned long)stack, &sp, NULL, pc, &ra); if ((((unsigned long)obj) >= lastsp) && (((unsigned long)obj + len) <= (sp - sizeof(void *)))) { /* obj is entirely within this stack frame */ diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 20a4da5bc344..306b6b6499df 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -32,10 +32,6 @@ #endif #define __UA_ADDR ".word" -#define __UA_LA "la" -#define __UA_ADDU "addu" -#define __UA_t0 "$8" -#define __UA_t1 "$9" #endif /* CONFIG_32BIT */ @@ -46,10 +42,6 @@ extern u64 __ua_limit; #define __UA_LIMIT __ua_limit #define __UA_ADDR ".dword" -#define __UA_LA "dla" -#define __UA_ADDU "daddu" -#define __UA_t0 "$12" -#define __UA_t1 "$13" #endif /* CONFIG_64BIT */ @@ -86,6 +78,20 @@ static inline bool eva_kernel_access(void) return uaccess_kernel(); } +/** + * eva_user_access() - determine whether access should use EVA instructions + * + * Determines whether memory accesses should be performed using EVA memory + * access instructions - that is, whether to access the user address space on + * an EVA system. + * + * Return: true if user memory access on an EVA system, else false + */ +static inline bool eva_user_access(void) +{ + return IS_ENABLED(CONFIG_EVA) && !eva_kernel_access(); +} + /* * Is a address valid? This does a straightforward calculation rather * than tests. @@ -331,7 +337,7 @@ do { \ " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ - " move %1, $0 \n" \ + " move %1, $zero \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ @@ -360,8 +366,8 @@ do { \ " .insn \n" \ " .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ - " move %1, $0 \n" \ - " move %D1, $0 \n" \ + " move %1, $zero \n" \ + " move %D1, $zero \n" \ " j 3b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ @@ -502,131 +508,31 @@ do { \ extern void __put_user_unknown(void); -/* - * We're generating jump to subroutines which will be outside the range of - * jump instructions - */ -#ifdef MODULE -#define __MODULE_JAL(destination) \ - ".set\tnoat\n\t" \ - __UA_LA "\t$1, " #destination "\n\t" \ - "jalr\t$1\n\t" \ - ".set\tat\n\t" -#else -#define __MODULE_JAL(destination) \ - "jal\t" #destination "\n\t" -#endif - -#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \ - defined(CONFIG_CPU_HAS_PREFETCH)) -#define DADDI_SCRATCH "$3" -#else -#define DADDI_SCRATCH "$0" -#endif - -extern size_t __copy_user(void *__to, const void *__from, size_t __n); - -#define __invoke_copy_from(func, to, from, n) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(func) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_to(func, to, from, n) \ -({ \ - register void __user *__cu_to_r __asm__("$4"); \ - register const void *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - __MODULE_JAL(func) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) - -#define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#ifndef CONFIG_EVA -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) - -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#else - -/* EVA specific functions */ - -extern size_t __copy_from_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_to_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); - -/* - * Source or destination address is in userland. We need to go through - * the TLB - */ -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_from_user_eva, to, from, n) - -#define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to(__copy_to_user_eva, to, from, n) - -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_in_user_eva, to, from, n) - -#endif /* CONFIG_EVA */ +extern size_t __copy_user(void *to, const void *from, size_t n, + const void *from_end); +extern size_t __copy_from_user_eva(void *to, const void *from, size_t n, + const void *from_end); +extern size_t __copy_to_user_eva(void *to, const void *from, size_t n, + const void *from_end); +extern size_t __copy_in_user_eva(void *to, const void *from, size_t n, + const void *from_end); static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - if (eva_kernel_access()) - return __invoke_copy_to_kernel(to, from, n); + if (eva_user_access()) + return __copy_to_user_eva(to, from, n, from + n); else - return __invoke_copy_to_user(to, from, n); + return __copy_user(to, from, n, from + n); } static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - if (eva_kernel_access()) - return __invoke_copy_from_kernel(to, from, n); + if (eva_user_access()) + return __copy_from_user_eva(to, from, n, from + n); else - return __invoke_copy_from_user(to, from, n); + return __copy_user(to, from, n, from + n); } #define INLINE_COPY_FROM_USER @@ -635,14 +541,14 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) static inline unsigned long raw_copy_in_user(void __user*to, const void __user *from, unsigned long n) { - if (eva_kernel_access()) - return ___invoke_copy_in_kernel(to, from, n); + if (eva_user_access()) + return __copy_in_user_eva(to, from, n, from + n); else - return ___invoke_copy_in_user(to, from, n); + return __copy_user(to, from, n, from + n); } -extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size); -extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); +extern __kernel_size_t __bzero_kernel(void __user *addr, int val, __kernel_size_t size); +extern __kernel_size_t __bzero(void __user *addr, int val, __kernel_size_t size); /* * __clear_user: - Zero a block of memory in user space, with less checking. @@ -658,32 +564,11 @@ extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); static inline __kernel_size_t __clear_user(void __user *addr, __kernel_size_t size) { - __kernel_size_t res; - - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, $0\n\t" - "move\t$6, %2\n\t" - __MODULE_JAL(__bzero_kernel) - "move\t%0, $6" - : "=r" (res) - : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); - } else { - might_fault(); - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, $0\n\t" - "move\t$6, %2\n\t" - __MODULE_JAL(__bzero) - "move\t%0, $6" - : "=r" (res) - : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); - } - - return res; + if (eva_kernel_access()) + return __bzero_kernel(addr, 0, size); + + might_fault(); + return __bzero(addr, 0, size); } #define clear_user(addr,n) \ @@ -720,32 +605,11 @@ extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long static inline long strncpy_from_user(char *__to, const char __user *__from, long __len) { - long res; - - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - "move\t$6, %3\n\t" - __MODULE_JAL(__strncpy_from_kernel_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (__to), "r" (__from), "r" (__len) - : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); - } else { - might_fault(); - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - "move\t$6, %3\n\t" - __MODULE_JAL(__strncpy_from_user_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (__to), "r" (__from), "r" (__len) - : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); - } - - return res; + if (eva_kernel_access()) + return __strncpy_from_kernel_asm(__to, __from, __len); + + might_fault(); + return __strncpy_from_user_asm(__to, __from, __len); } extern long __strnlen_kernel_asm(const char __user *s, long n); @@ -766,30 +630,12 @@ extern long __strnlen_user_asm(const char __user *s, long n); */ static inline long strnlen_user(const char __user *s, long n) { - long res; - might_fault(); - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - __MODULE_JAL(__strnlen_kernel_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (s), "r" (n) - : "$2", "$4", "$5", __UA_t0, "$31"); - } else { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - __MODULE_JAL(__strnlen_user_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (s), "r" (n) - : "$2", "$4", "$5", __UA_t0, "$31"); - } - - return res; + + if (eva_kernel_access()) + return __strnlen_kernel_asm(s, n); + + return __strnlen_user_asm(s, n); } #endif /* _ASM_UACCESS_H */ diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index 3c09450908aa..fe95c0d1579a 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h @@ -9,11 +9,16 @@ * Changed system calls macros _syscall5 - _syscall7 to push args 5 to 7 onto * the stack. Robin Farine for ACN S.A, Copyright (C) 1996 by ACN S.A */ -#ifndef _ASM_UNISTD_H +/* Enable inclusion twice when defining syscall table */ +#if !defined(_ASM_UNISTD_H) || defined(__SYSCALL) #define _ASM_UNISTD_H #include +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || \ + (_MIPS_SIM == _MIPS_SIM_NABI32) || \ + (_MIPS_SIM == _MIPS_SIM_ABI64) + #ifdef CONFIG_MIPS32_N32 #define NR_syscalls (__NR_N32_Linux + __NR_N32_Linux_syscalls) #elif defined(CONFIG_64BIT) @@ -25,6 +30,7 @@ #ifndef __ASSEMBLY__ #define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_IPC @@ -67,4 +73,13 @@ #endif /* !__ASSEMBLY__ */ +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 or _MIPS_SIM_NABI32 or _MIPS_SIM_ABI64 */ + +#if (_MIPS_SIM == _MIPS_SIM_PABI32) + +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_MMAP_4KOFF + +#endif /* _MIPS_SIM == _MIPS_SIM_PABI32 */ + #endif /* _ASM_UNISTD_H */ diff --git a/arch/mips/include/asm/uprobes.h b/arch/mips/include/asm/uprobes.h index b86d1ae07125..359a77296cc6 100644 --- a/arch/mips/include/asm/uprobes.h +++ b/arch/mips/include/asm/uprobes.h @@ -11,31 +11,48 @@ #include #include +#include /* * We want this to be defined as union mips_instruction but that makes the * generic code blow up. */ -typedef u32 uprobe_opcode_t; +#ifdef __nanomips__ +typedef struct { u16 h[3]; } uprobe_opcode_t; -/* - * Classic MIPS (note this implementation doesn't consider microMIPS yet) - * instructions are always 4 bytes but in order to deal with branches and - * their delay slots, we treat instructions as having 8 bytes maximum. - */ -#define MAX_UINSN_BYTES 8 -#define UPROBE_XOL_SLOT_BYTES 128 /* Max. cache line size */ +static inline bool uprobe_opcode_equal(uprobe_opcode_t a, uprobe_opcode_t b) +{ + unsigned int i; + + for (i = 0; i < (nanomips_insn_len(a.h[0]) / 2); i++) { + if (a.h[i] != b.h[i]) + return false; + } -#define UPROBE_BRK_UPROBE 0x000d000d /* break 13 */ -#define UPROBE_BRK_UPROBE_XOL 0x000e000d /* break 14 */ + return true; +} +# define uprobe_opcode_equal uprobe_opcode_equal -#define UPROBE_SWBP_INSN UPROBE_BRK_UPROBE -#define UPROBE_SWBP_INSN_SIZE 4 +# define UPROBE_MAX_XOL_INSNS 1 + +# define UPROBE_XOLBREAK_INSN ((uprobe_opcode_t){{ 0x1014 }}) /* break 4 */ +# define UPROBE_SWBP_INSN ((uprobe_opcode_t){{ 0x1013 }}) /* break 3 */ +# define UPROBE_SWBP_INSN_SIZE 2 +#else +typedef u32 uprobe_opcode_t; +# define UPROBE_MAX_XOL_INSNS 2 + +# define UPROBE_XOLBREAK_INSN 0x0004000d /* break 4 */ +# define UPROBE_SWBP_INSN 0x0003000d /* break 3 */ +# define UPROBE_SWBP_INSN_SIZE 4 +#endif + +#define UPROBE_XOL_SLOT_BYTES 128 /* Max. cache line size */ struct arch_uprobe { unsigned long resume_epc; - u32 insn[2]; - u32 ixol[2]; + uprobe_opcode_t insn[UPROBE_MAX_XOL_INSNS]; + uprobe_opcode_t ixol[2]; }; struct arch_uprobe_task { diff --git a/arch/mips/include/uapi/asm/break.h b/arch/mips/include/uapi/asm/break.h index 10380b1bc601..9cde8fcad472 100644 --- a/arch/mips/include/uapi/asm/break.h +++ b/arch/mips/include/uapi/asm/break.h @@ -17,13 +17,13 @@ * non-Linux/MIPS object files or make use of them in the future. */ #define BRK_USERBP 0 /* User bp (used by debuggers) */ +#define BRK_UPROBE 3 /* See */ +#define BRK_UPROBE_XOL 4 /* See */ #define BRK_SSTEPBP 5 /* User bp (used by debuggers) */ #define BRK_OVERFLOW 6 /* Overflow check */ #define BRK_DIVZERO 7 /* Divide by zero check */ #define BRK_RANGE 8 /* Range error check */ #define BRK_BUG 12 /* Used by BUG() */ -#define BRK_UPROBE 13 /* See */ -#define BRK_UPROBE_XOL 14 /* See */ #define BRK_MEMU 514 /* Used by FPU emulator */ #define BRK_KPROBE_BP 515 /* Kprobe break */ #define BRK_KPROBE_SSTEPBP 516 /* Kprobe single step software implementation */ diff --git a/arch/mips/include/uapi/asm/errno.h b/arch/mips/include/uapi/asm/errno.h index 2fb714e2d6d8..a6c6f898dde6 100644 --- a/arch/mips/include/uapi/asm/errno.h +++ b/arch/mips/include/uapi/asm/errno.h @@ -9,10 +9,27 @@ #ifndef _UAPI_ASM_ERRNO_H #define _UAPI_ASM_ERRNO_H +#include + +/* + * P32 uses the standard generic error numbering, contrary to the older ABIs + * below. + */ + +#if (_MIPS_SIM == _MIPS_SIM_PABI32) + +#include + +#endif /* _MIPS_SIM == _MIPS_SIM_PABI32 */ + /* * These error numbers are intended to be MIPS ABI compatible */ +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || \ + (_MIPS_SIM == _MIPS_SIM_NABI32) || \ + (_MIPS_SIM == _MIPS_SIM_ABI64) + #include #define ENOMSG 35 /* No message of desired type */ @@ -126,5 +143,6 @@ #define EDQUOT 1133 /* Quota exceeded */ +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 or _MIPS_SIM_NABI32 or _MIPS_SIM_ABI64 */ #endif /* _UAPI_ASM_ERRNO_H */ diff --git a/arch/mips/include/uapi/asm/fcntl.h b/arch/mips/include/uapi/asm/fcntl.h index 42e13dead543..83a62849f682 100644 --- a/arch/mips/include/uapi/asm/fcntl.h +++ b/arch/mips/include/uapi/asm/fcntl.h @@ -9,6 +9,10 @@ #ifndef _UAPI_ASM_FCNTL_H #define _UAPI_ASM_FCNTL_H +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || \ + (_MIPS_SIM == _MIPS_SIM_NABI32) || \ + (_MIPS_SIM == _MIPS_SIM_ABI64) + #include #define O_APPEND 0x0008 @@ -75,6 +79,8 @@ struct flock { #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32, _MIPS_SIM_NABI32, or _MIPS_SIM_ABI64 */ + #include #endif /* _UAPI_ASM_FCNTL_H */ diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h index f3c025445e45..e2bb4b862b64 100644 --- a/arch/mips/include/uapi/asm/ptrace.h +++ b/arch/mips/include/uapi/asm/ptrace.h @@ -70,7 +70,8 @@ struct pt_regs { /* Read and write watchpoint registers. */ enum pt_watch_style { pt_watch_style_mips32, - pt_watch_style_mips64 + pt_watch_style_mips64, + pt_watch_style_i7200, }; struct mips32_watch_regs { unsigned int watchlo[8]; diff --git a/arch/mips/include/uapi/asm/resource.h b/arch/mips/include/uapi/asm/resource.h index 372ff8f4bc06..731d5ca8f585 100644 --- a/arch/mips/include/uapi/asm/resource.h +++ b/arch/mips/include/uapi/asm/resource.h @@ -10,6 +10,11 @@ #ifndef _ASM_RESOURCE_H #define _ASM_RESOURCE_H +#include + +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || \ + (_MIPS_SIM == _MIPS_SIM_NABI32) || \ + (_MIPS_SIM == _MIPS_SIM_ABI64) /* * These five resource limit IDs have a MIPS/Linux-specific ordering, @@ -31,6 +36,8 @@ # define RLIM_INFINITY 0x7fffffffUL #endif +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 or _MIPS_SIM_NABI32 or _MIPS_SIM_ABI64 */ + #include #endif /* _ASM_RESOURCE_H */ diff --git a/arch/mips/include/uapi/asm/sgidefs.h b/arch/mips/include/uapi/asm/sgidefs.h index 26143e3b7c26..56dc5838f9e8 100644 --- a/arch/mips/include/uapi/asm/sgidefs.h +++ b/arch/mips/include/uapi/asm/sgidefs.h @@ -41,5 +41,6 @@ #define _MIPS_SIM_ABI32 1 #define _MIPS_SIM_NABI32 2 #define _MIPS_SIM_ABI64 3 +#define _MIPS_SIM_PABI32 5 #endif /* __ASM_SGIDEFS_H */ diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h index d0a540e88bb4..bbfc4aa80070 100644 --- a/arch/mips/include/uapi/asm/sigcontext.h +++ b/arch/mips/include/uapi/asm/sigcontext.h @@ -88,4 +88,21 @@ struct sigcontext { #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ +#if _MIPS_SIM == _MIPS_SIM_PABI32 + +/* + * nanoMIPS makes the sigcontext a bit more minimal, with all optional ASE + * context (DSP, FPU and MSA) represented as extended context entries. + */ +struct sigcontext { + __u64 sc_regs[32]; + __u64 sc_pc; + __u32 sc_used_math; + __u32 sc_reserved; +}; + +#define __MIPS_REDUCED_SIGCONTEXT + +#endif /* _MIPS_SIM == _MIPS_SIM_PABI32 */ + #endif /* _UAPI_ASM_SIGCONTEXT_H */ diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index f17d8163dec6..2d9039045d0d 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -10,27 +10,35 @@ #ifndef _UAPI_ASM_SIGINFO_H #define _UAPI_ASM_SIGINFO_H - -#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int)) +/* FIXME enable this for P32? */ #undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */ +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || \ + (_MIPS_SIM == _MIPS_SIM_NABI32) || \ + (_MIPS_SIM == _MIPS_SIM_ABI64) + #define HAVE_ARCH_SIGINFO_T +#define __ARCH_SIGSYS + +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 or _MIPS_SIM_NABI32 or _MIPS_SIM_ABI64 */ /* * Careful to keep union _sifields from shifting ... */ -#if _MIPS_SZLONG == 32 -#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) -#elif _MIPS_SZLONG == 64 +#if _MIPS_SZLONG == 64 #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) -#else +#elif _MIPS_SZLONG != 32 #error _MIPS_SZLONG neither 32 nor 64 #endif -#define __ARCH_SIGSYS - #include +/* + * nanoMIPS p32 ABI uses the generic struct siginfo defined in + * asm-generic/siginfo.h. + */ +#ifdef HAVE_ARCH_SIGINFO_T + /* We can't use generic siginfo_t, because our si_code and si_errno are swapped */ typedef struct siginfo { int si_signo; @@ -124,4 +132,6 @@ typedef struct siginfo { #define SI_TIMER -3 /* sent by timer expiration */ #define SI_MESGQ -4 /* sent by real time mesq state change */ +#endif /* HAVE_ARCH_SIGINFO_T */ + #endif /* _UAPI_ASM_SIGINFO_H */ diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h index 53104b10aae2..5add30444001 100644 --- a/arch/mips/include/uapi/asm/signal.h +++ b/arch/mips/include/uapi/asm/signal.h @@ -10,9 +10,20 @@ #ifndef _UAPI_ASM_SIGNAL_H #define _UAPI_ASM_SIGNAL_H +#if _MIPS_SIM == _MIPS_SIM_PABI32 + +#include + +#endif /* _MIPS_SIM == _MIPS_SIM_PABI32 */ + +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || \ + (_MIPS_SIM == _MIPS_SIM_NABI32) || \ + (_MIPS_SIM == _MIPS_SIM_ABI64) + #include #define _NSIG 128 + #define _NSIG_BPW (sizeof(unsigned long) * 8) #define _NSIG_WORDS (_NSIG / _NSIG_BPW) @@ -116,5 +127,6 @@ typedef struct sigaltstack { int ss_flags; } stack_t; +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 or _MIPS_SIM_NABI32 or _MIPS_SIM_ABI64 */ #endif /* _UAPI_ASM_SIGNAL_H */ diff --git a/arch/mips/include/uapi/asm/stat.h b/arch/mips/include/uapi/asm/stat.h index 95416f366d7f..0073e54b6fcd 100644 --- a/arch/mips/include/uapi/asm/stat.h +++ b/arch/mips/include/uapi/asm/stat.h @@ -85,6 +85,8 @@ struct stat64 { long long st_blocks; }; +#define STAT_HAVE_NSEC 1 + #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -126,8 +128,15 @@ struct stat { unsigned long st_blocks; }; +#define STAT_HAVE_NSEC 1 + #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ -#define STAT_HAVE_NSEC 1 +#if _MIPS_SIM == _MIPS_SIM_PABI32 + +/* Use the generic struct stat */ +#include + +#endif /* _MIPS_SIM == _MIPS_SIM_PABI32 */ #endif /* _ASM_STAT_H */ diff --git a/arch/mips/include/uapi/asm/statfs.h b/arch/mips/include/uapi/asm/statfs.h index f4174dcaef5e..313f9ecd7b16 100644 --- a/arch/mips/include/uapi/asm/statfs.h +++ b/arch/mips/include/uapi/asm/statfs.h @@ -9,6 +9,10 @@ #ifndef _ASM_STATFS_H #define _ASM_STATFS_H +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || \ + (_MIPS_SIM == _MIPS_SIM_NABI32) || \ + (_MIPS_SIM == _MIPS_SIM_ABI64) + #include #include @@ -38,6 +42,8 @@ struct statfs { long f_spare[5]; }; +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32, _MIPS_SIM_NABI32, or _MIPS_SIM_ABI64 */ + #if (_MIPS_SIM == _MIPS_SIM_ABI32) || (_MIPS_SIM == _MIPS_SIM_NABI32) /* @@ -98,4 +104,11 @@ struct compat_statfs64 { #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ +#if _MIPS_SIM == _MIPS_SIM_PABI32 + +/* p32 uses the generic statfs structs */ +#include + +#endif /* _MIPS_SIM == _MIPS_SIM_PABI32 */ + #endif /* _ASM_STATFS_H */ diff --git a/arch/mips/include/uapi/asm/ucontext.h b/arch/mips/include/uapi/asm/ucontext.h index 2d3bf8eebf1f..1c6a078ebabe 100644 --- a/arch/mips/include/uapi/asm/ucontext.h +++ b/arch/mips/include/uapi/asm/ucontext.h @@ -2,6 +2,8 @@ #ifndef __MIPS_UAPI_ASM_UCONTEXT_H #define __MIPS_UAPI_ASM_UCONTEXT_H +#include + /** * struct extcontext - extended context header structure * @magic: magic value identifying the type of extended context @@ -31,15 +33,106 @@ struct extcontext { * If MSA context is live for a task at the time a signal is delivered to it, * this structure will hold the MSA context of the task as it was prior to the * signal delivery. + * + * On nanoMIPS ABIs the most significant 64 bits of the MSA vector registers can + * be found in the fpu_extcontext entry with the rest of the vector register + * state. Use the FPU_EXTCTXT_RA() macro to abstract endianness differences. */ struct msa_extcontext { struct extcontext ext; #define MSA_EXTCONTEXT_MAGIC 0x784d5341 /* xMSA */ +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || \ + (_MIPS_SIM == _MIPS_SIM_NABI32) || \ + (_MIPS_SIM == _MIPS_SIM_ABI64) unsigned long long wr[32]; +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 or _MIPS_SIM_NABI32 or _MIPS_SIM_ABI64 */ unsigned int csr; }; +#if 0 /* TODO implement nanoMIPS FP/MSA extcontext support */ +#if _MIPS_SIM == _MIPS_SIM_PABI32 + +/** + * struct dsp_extcontext - nanoMIPS DSP extended context structure + * @ext: the extended context header, with magic == DSP_EXTCONTEXT_MAGIC + * @hi: the values of the hi accumulators + * @lo: the values of the lo accumulators + * + * If DSP context is live for a task at the time a signal is delivered to it, + * this structure will hold the DSP context of the task as it was prior to the + * signal delivery. + */ +struct dsp_extcontext { + struct extcontext ext; +# define DSP_EXTCONTEXT_MAGIC 0x78445350 /* xDSP */ + + unsigned long hi[4]; + unsigned long lo[4]; + __u32 dsp; +}; + +/** + * struct fpu_extcontext - nanoMIPS FPU / Vector extended context structure + * @ext: the extended context header, with magic == FPU_EXTCONTEXT_MAGIC + * @fcsr: the value of the FPU context & status register + * @width: width of each register in bytes, always a power of two + * @r: the FPU registers, each one a full native-endian wide value. + * use FPU_EXTCTXT_RA() macro after validating @width to get the + * address of an element of a register, or an FPU register since it + * may not be at the low address. + * + * If FPU or vector context is live for a task at the time a signal is delivered + * to it, this structure will hold the FPU context of the task as it was prior + * to the signal delivery, including any vector register state which aliases the + * FPU registers. + */ +struct fpu_extcontext { + struct extcontext ext; +# define FPU_EXTCONTEXT_MAGIC 0x78465055 /* xFPU */ + + unsigned int fcsr; + unsigned int width; + + unsigned long long r[0]; +}; + +# ifdef __MIPSEL__ +# define FPU_EXTCTXT_OFFSET(idx, width, type) ((idx) * (size)) +# endif /* __MIPSEL__ */ + +# ifdef __MIPSEB__ +# define FPU_EXTCTXT_OFFSET(idx, width, type) (((idx) * (size)) ^ \ + ((width) - (size))) +# endif /* __MIPSEB__ */ + +/** + * FPU_EXTCTX_RA() - Get address of vector reg element in fpu_extcontext + * @fpuec: pointer to struct fpu_extcontext entry + * @reg: FPU or vector register number + * @type: type of element to return address of + * @idx: index of element within vector register (0 for FPU register) + * + * Abstract away endianness differences to find a pointer to a single element in + * a vector register. Due to the little endian aliasing of different sized + * elements in the MSA vector registers, it is unsafe to simply index the + * resulting pointer, as that will do the wrong thing on big endian CPUs. + * + * E.g.: + * float f24_sgl = *FPU_EXTCTXT_RA(&fpuec, 24, float, 0) + * double f30_dbl = *FPU_EXTCTXT_RA(&fpuec, 30, double, 0) + * u64 w4_hi = *FPU_EXTCTXT_RA(&fpuec, 4, u64, 1) + */ +# define FPU_EXTCTXT_RA(fpuec, reg, type, idx) ({ \ + struct fpu_extcontext *_fpuec = (fpuec); \ + unsigned long _size = sizeof(type); \ + (type *)(unsigned long)_fpuec->r + (reg) * _fpuec->width \ + + FPU_EXTCTX_OFFSET(idx, _fpuec->width, _size); \ +}) + +#endif /* _MIPS_SIM == _MIPS_SIM_PABI32 */ +#endif + #define END_EXTCONTEXT_MAGIC 0x78454e44 /* xEND */ /** diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index bb05e9916a5f..5821917ad321 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -10,7 +10,8 @@ * Changed system calls macros _syscall5 - _syscall7 to push args 5 to 7 onto * the stack. Robin Farine for ACN S.A, Copyright (C) 1996 by ACN S.A */ -#ifndef _UAPI_ASM_UNISTD_H +/* Enable inclusion twice when defining syscall table */ +#if !defined(_UAPI_ASM_UNISTD_H) || defined(__SYSCALL) #define _UAPI_ASM_UNISTD_H #include @@ -1092,4 +1093,23 @@ #define __NR_N32_Linux 6000 #define __NR_N32_Linux_syscalls 330 +#if _MIPS_SIM == _MIPS_SIM_PABI32 + +#define sys_mmap2 sys_mmap_4koff + +/* Rearrange arguments to avoid register hole */ +#define __ARCH_WANT_SYNC_FILE_RANGE2 +#define __ARCH_WANT_SYS_FADVISE64_64_2 + +/* P32 has completely separate syscall numbering */ +#include + +/* architecture specific syscalls. */ +#define __NR_set_thread_area (__NR_arch_specific_syscall + 0) +__SYSCALL(__NR_set_thread_area, sys_set_thread_area) + +#define NR_syscalls __NR_syscalls + +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ + #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index b41486b14598..aff85739d9d9 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -7,8 +7,8 @@ extra-y := head.o vmlinux.lds obj-y += cmpxchg.o cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \ process.o prom.o ptrace.o reset.o setup.o signal.o \ - syscall.o time.o topology.o traps.o unaligned.o watch.o \ - vdso.o cacheinfo.o + time.o topology.o traps.o unaligned.o watch.o vdso.o \ + cacheinfo.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = -pg @@ -42,9 +42,8 @@ sw-$(CONFIG_CPU_TX39XX) := r2300_switch.o sw-$(CONFIG_CPU_CAVIUM_OCTEON) := octeon_switch.o obj-y += $(sw-y) +obj-$(CONFIG_CPU_R2300_FPU) += r2300_fpu.o obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o -obj-$(CONFIG_CPU_R3000) += r2300_fpu.o -obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP_UP) += smp-up.o @@ -71,8 +70,14 @@ obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o obj-$(CONFIG_KPROBES) += kprobes.o +ifneq ($(CONFIG_CPU_NANOMIPS),y) obj-$(CONFIG_32BIT) += scall32-o32.o obj-$(CONFIG_64BIT) += scall64-64.o +obj-y += syscall.o +else +obj-$(CONFIG_32BIT) += scall32-p32.o +obj-y += syscall-nanomips.o +endif obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o signal_o32.o diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index f2b4d5bafd27..66d0a1936ab8 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -100,6 +100,7 @@ void output_thread_info_defines(void) OFFSET(TI_REGS, thread_info, regs); DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); + DEFINE(_THREAD_MASK_BITS, PAGE_SHIFT + THREAD_SIZE_ORDER); DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); DEFINE(_IRQ_STACK_START, IRQ_STACK_START); BLANK(); @@ -209,9 +210,25 @@ void output_mm_defines(void) DEFINE(_PAGE_SHIFT, PAGE_SHIFT); DEFINE(_PAGE_SIZE, PAGE_SIZE); BLANK(); + DEFINE(__PAGE_GLOBAL_SHIFT, _PAGE_GLOBAL_SHIFT); +#if defined(CONFIG_XPA) || defined(CONFIG_CPU_HAS_RIXI) + DEFINE(__PAGE_NO_EXEC_SHIFT, _PAGE_NO_EXEC_SHIFT); +#endif + BLANK(); + DEFINE(__PAGE_ACCESSED, _PAGE_ACCESSED); + DEFINE(__PAGE_MODIFIED, _PAGE_MODIFIED); + DEFINE(__PAGE_VALID, _PAGE_VALID); + DEFINE(__PAGE_DIRTY, _PAGE_DIRTY); + DEFINE(__PAGE_PRESENT, _PAGE_PRESENT); + DEFINE(__PAGE_WRITE, _PAGE_WRITE); + BLANK(); } -#ifdef CONFIG_32BIT +#if (_MIPS_SIM == _MIPS_SIM_ABI32) || \ + (_MIPS_SIM == _MIPS_SIM_NABI32) || \ + (_MIPS_SIM == _MIPS_SIM_ABI64) + +#if defined(CONFIG_32BIT) void output_sc_defines(void) { COMMENT("Linux sigcontext offsets."); @@ -233,7 +250,7 @@ void output_sc_defines(void) } #endif -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) void output_sc_defines(void) { COMMENT("Linux sigcontext offsets."); @@ -247,6 +264,20 @@ void output_sc_defines(void) } #endif +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 or _MIPS_SIM_NABI32 or _MIPS_SIM_ABI64 */ + +#if _MIPS_SIM == _MIPS_SIM_PABI32 + +void output_sc_defines(void) +{ + COMMENT("Linux sigcontext offsets."); + OFFSET(SC_REGS, sigcontext, sc_regs); + OFFSET(SC_PC, sigcontext, sc_pc); + BLANK(); +} + +#endif /* _MIPS_SIM == _MIPS_SIM_PABI32 */ + void output_signal_defined(void) { COMMENT("Linux signal numbers."); @@ -257,7 +288,9 @@ void output_signal_defined(void) DEFINE(_SIGTRAP, SIGTRAP); DEFINE(_SIGIOT, SIGIOT); DEFINE(_SIGABRT, SIGABRT); +#ifdef SIGEMT DEFINE(_SIGEMT, SIGEMT); +#endif DEFINE(_SIGFPE, SIGFPE); DEFINE(_SIGKILL, SIGKILL); DEFINE(_SIGBUS, SIGBUS); diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index b79ed9af9886..256cce81ed92 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -393,6 +393,22 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs) return 0; } +/* + * Compute return address in nanoMIPS after an exception only. + * nanoMIPS has no branch delay slots so there is no need to emulate branches, + * we only need to jump over the current instruction. + */ +int __nanoMIPS_compute_return_epc(struct pt_regs *regs) +{ + u16 __user *addr; + u16 opcode; + + addr = (u16 __user *)msk_isa16_mode(regs->cp0_epc); + __get_user(opcode, addr); + regs->cp0_epc += nanomips_insn_len(opcode); + return 0; +} + /** * __compute_return_epc_for_insn - Computes the return address and do emulate * branch simulation, if required. diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 7e5a8ece993a..ab61dc1a52c4 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -27,13 +27,14 @@ #define CM3_GCR_Cx_ID_CORENUM_SHF 0 #define CM3_GCR_Cx_ID_CORENUM_MSK (0xff << 0) +#define CPC_CL_CMD_OFS 0x2000 +#define CPC_CL_CMD_PWRDOWN 0x2 + #define CPC_CL_VC_STOP_OFS 0x2020 #define CPC_CL_VC_RUN_OFS 0x2028 .extern mips_cm_base -.set noreorder - #ifdef CONFIG_64BIT # define STATUS_BITDEPS ST0_KX #else @@ -45,7 +46,6 @@ #define DUMP_EXCEP(name) \ PTR_LA a0, 8f; \ jal mips_cps_bev_dump; \ - nop; \ TEXT(name) #else /* !CONFIG_MIPS_CPS_NS16550 */ @@ -61,12 +61,11 @@ .macro has_mt dest, nomt mfc0 \dest, CP0_CONFIG, 1 bgez \dest, \nomt - mfc0 \dest, CP0_CONFIG, 2 + mfc0 \dest, CP0_CONFIG, 2 bgez \dest, \nomt - mfc0 \dest, CP0_CONFIG, 3 + mfc0 \dest, CP0_CONFIG, 3 andi \dest, \dest, MIPS_CONF3_MT beqz \dest, \nomt - nop .endm /* @@ -77,16 +76,15 @@ .macro has_vp dest, nomt mfc0 \dest, CP0_CONFIG, 1 bgez \dest, \nomt - mfc0 \dest, CP0_CONFIG, 2 + mfc0 \dest, CP0_CONFIG, 2 bgez \dest, \nomt - mfc0 \dest, CP0_CONFIG, 3 + mfc0 \dest, CP0_CONFIG, 3 bgez \dest, \nomt - mfc0 \dest, CP0_CONFIG, 4 + mfc0 \dest, CP0_CONFIG, 4 bgez \dest, \nomt - mfc0 \dest, CP0_CONFIG, 5 + mfc0 \dest, CP0_CONFIG, 5 andi \dest, \dest, MIPS_CONF5_VP beqz \dest, \nomt - nop .endm /* Calculate an uncached address for the CM GCRs */ @@ -114,12 +112,10 @@ LEAF(mips_cps_core_entry) mfc0 k0, CP0_STATUS and k0, k0, ST0_NMI beqz k0, not_nmi - nop /* This is an NMI */ PTR_LA k0, nmi_handler jr k0 - nop not_nmi: /* Setup Cause */ @@ -131,18 +127,16 @@ not_nmi: mtc0 t0, CP0_STATUS /* Skip cache & coherence setup if we're already coherent */ - cmgcrb v1 - lw s7, GCR_CL_COHERENCE_OFS(v1) + cmgcrb t8 + lw s7, GCR_CL_COHERENCE_OFS(t8) bnez s7, 1f - nop /* Initialize the L1 caches */ jal mips_cps_cache_init - nop /* Enter the coherent domain */ li t0, 0xff - sw t0, GCR_CL_COHERENCE_OFS(v1) + sw t0, GCR_CL_COHERENCE_OFS(t8) ehb /* Set Kseg0 CCA to that in s0 */ @@ -156,7 +150,6 @@ not_nmi: /* Jump to kseg0 */ PTR_LA t0, 1f jr t0 - nop /* * We're up, cached & coherent. Perform any EVA initialization necessary @@ -166,72 +159,62 @@ not_nmi: /* Retrieve boot configuration pointers */ jal mips_cps_get_bootcfg - nop /* Skip core-level init if we started up coherent */ bnez s7, 1f - nop /* Perform any further required core-level initialisation */ jal mips_cps_core_init - nop /* * Boot any other VPEs within this core that should be online, and * deactivate this VPE if it should be offline. */ + move a0, v0 move a1, t9 jal mips_cps_boot_vpes - move a0, v0 /* Off we go! */ -1: PTR_L t1, VPEBOOTCFG_PC(v1) - PTR_L gp, VPEBOOTCFG_GP(v1) - PTR_L sp, VPEBOOTCFG_SP(v1) +1: PTR_L t1, VPEBOOTCFG_PC(s1) + PTR_L gp, VPEBOOTCFG_GP(s1) + PTR_L sp, VPEBOOTCFG_SP(s1) jr t1 - nop END(mips_cps_core_entry) .org 0x200 LEAF(excep_tlbfill) DUMP_EXCEP("TLB Fill") b . - nop END(excep_tlbfill) .org 0x280 LEAF(excep_xtlbfill) DUMP_EXCEP("XTLB Fill") b . - nop END(excep_xtlbfill) .org 0x300 LEAF(excep_cache) DUMP_EXCEP("Cache") b . - nop END(excep_cache) .org 0x380 LEAF(excep_genex) DUMP_EXCEP("General") b . - nop END(excep_genex) .org 0x400 LEAF(excep_intex) DUMP_EXCEP("Interrupt") b . - nop END(excep_intex) .org 0x480 LEAF(excep_ejtag) PTR_LA k0, ejtag_debug_handler jr k0 - nop END(excep_ejtag) LEAF(mips_cps_core_init) @@ -250,7 +233,6 @@ LEAF(mips_cps_core_init) dvpe PTR_LA t1, 1f jr.hb t1 - nop /* Enter VPE configuration state */ 1: mfc0 t0, CP0_MVPCONTROL @@ -265,7 +247,6 @@ LEAF(mips_cps_core_init) /* If there's only 1, we're done */ beqz t0, 2f - nop /* Loop through each VPE within this core */ li ta1, 1 @@ -294,7 +275,6 @@ LEAF(mips_cps_core_init) addiu ta1, ta1, 1 slt t0, ta1, ta3 bnez t0, 1b - nop /* Leave VPE configuration state */ 2: mfc0 t0, CP0_MVPCONTROL @@ -304,14 +284,13 @@ LEAF(mips_cps_core_init) 3: .set pop #endif jr ra - nop END(mips_cps_core_init) /** * mips_cps_get_bootcfg() - retrieve boot configuration pointers * * Returns: pointer to struct core_boot_config in v0, pointer to - * struct vpe_boot_config in v1, VPE ID in t9 + * struct vpe_boot_config in s1, VPE ID in t9 */ LEAF(mips_cps_get_bootcfg) /* Calculate a pointer to this cores struct core_boot_config */ @@ -344,7 +323,9 @@ LEAF(mips_cps_get_bootcfg) */ mfc0 t9, CP0_GLOBALNUMBER andi t9, t9, MIPS_GLOBALNUMBER_VP -#elif defined(CONFIG_MIPS_MT_SMP) +#endif +1: +#if defined(CONFIG_MIPS_MT_SMP) has_mt ta2, 1f /* Find the number of VPEs present in the core */ @@ -368,12 +349,11 @@ LEAF(mips_cps_get_bootcfg) 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ li t1, VPEBOOTCFG_SIZE - mul v1, t9, t1 + mul s1, t9, t1 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) - PTR_ADDU v1, v1, ta3 + PTR_ADDU s1, s1, ta3 jr ra - nop END(mips_cps_get_bootcfg) LEAF(mips_cps_boot_vpes) @@ -400,10 +380,12 @@ LEAF(mips_cps_boot_vpes) PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) ehb -#elif defined(CONFIG_MIPS_MT) +#endif + +#if defined(CONFIG_MIPS_MT) /* If the core doesn't support MT then return */ - has_mt t0, 5f +5: has_mt t0, 5f /* Enter VPE configuration state */ .set push @@ -414,7 +396,6 @@ LEAF(mips_cps_boot_vpes) PTR_LA t1, 1f jr.hb t1 - nop 1: mfc0 t1, CP0_MVPCONTROL ori t1, t1, MVPCONTROL_VPC mtc0 t1, CP0_MVPCONTROL @@ -427,7 +408,6 @@ LEAF(mips_cps_boot_vpes) /* Check whether the VPE should be running. If not, skip it */ 1: andi t0, ta2, 1 beqz t0, 2f - nop /* Operate on the appropriate TC */ mfc0 t0, CP0_VPECONTROL @@ -444,7 +424,6 @@ LEAF(mips_cps_boot_vpes) /* Skip the VPE if its TC is not halted */ mftc0 t0, CP0_TCHALT beqz t0, 2f - nop /* Calculate a pointer to the VPEs struct vpe_boot_config */ li t0, VPEBOOTCFG_SIZE @@ -458,10 +437,12 @@ LEAF(mips_cps_boot_vpes) /* Set the TC stack pointer */ lw t1, VPEBOOTCFG_SP(t0) mttgpr t1, sp + mttgpr sp, t1 /* Set the TC global pointer */ lw t1, VPEBOOTCFG_GP(t0) mttgpr t1, gp + mttgpr gp, t1 /* Copy config from this VPE */ mfc0 t0, CP0_CONFIG @@ -474,7 +455,6 @@ LEAF(mips_cps_boot_vpes) mfc0 t0, CP0_CONFIG, 3 and t0, t0, MIPS_CONF3_SC beqz t0, 3f - nop mfc0 t0, CP0_SEGCTL0 mttc0 t0, CP0_SEGCTL0 mfc0 t0, CP0_SEGCTL1 @@ -490,7 +470,7 @@ LEAF(mips_cps_boot_vpes) mftc0 t0, CP0_TCSTATUS li t1, ~TCSTATUS_IXMT and t0, t0, t1 - ori t0, t0, TCSTATUS_A + or t0, t0, TCSTATUS_A mttc0 t0, CP0_TCSTATUS /* Clear the TC halt bit */ @@ -505,7 +485,6 @@ LEAF(mips_cps_boot_vpes) 2: srl ta2, ta2, 1 addiu ta1, ta1, 1 bnez ta2, 1b - nop /* Leave VPE configuration state */ mfc0 t1, CP0_MVPCONTROL @@ -521,14 +500,12 @@ LEAF(mips_cps_boot_vpes) sll t0, t0, a1 and t0, t0, t8 bnez t0, 2f - nop /* This VPE should be offline, halt the TC */ li t0, TCHALT_H mtc0 t0, CP0_TCHALT PTR_LA t0, 1f 1: jr.hb t0 - nop 2: @@ -536,7 +513,6 @@ LEAF(mips_cps_boot_vpes) /* Return */ 5: jr ra - nop END(mips_cps_boot_vpes) LEAF(mips_cps_cache_init) @@ -553,23 +529,23 @@ LEAF(mips_cps_cache_init) ehb /* Primary cache configuration is indicated by Config1 */ - mfc0 v0, CP0_CONFIG, 1 + mfc0 ta0, CP0_CONFIG, 1 /* Detect I-cache line size */ - _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ + _EXT t0, ta0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ + li t1, 2 beqz t0, icache_done - li t1, 2 sllv t0, t1, t0 /* Detect I-cache size */ - _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ + _EXT t1, ta0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ xori t2, t1, 0x7 + li t3, 32 beqz t2, 1f - li t3, 32 addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == I-cache sets per way */ - _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ + _EXT t2, ta0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 @@ -579,38 +555,35 @@ LEAF(mips_cps_cache_init) 1: cache Index_Store_Tag_I, 0(a0) PTR_ADD a0, a0, t0 bne a0, a1, 1b - nop icache_done: /* Detect D-cache line size */ - _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ + _EXT t0, ta0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ + li t1, 2 beqz t0, dcache_done - li t1, 2 sllv t0, t1, t0 /* Detect D-cache size */ - _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ + _EXT t1, ta0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ xori t2, t1, 0x7 + li t3, 32 beqz t2, 1f - li t3, 32 addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == D-cache sets per way */ - _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ + _EXT t2, ta0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 li a0, CKSEG0 PTR_ADDU a1, a0, t1 - PTR_SUBU a1, a1, t0 1: cache Index_Store_Tag_D, 0(a0) + PTR_ADD a0, a0, t0 bne a0, a1, 1b - PTR_ADD a0, a0, t0 dcache_done: jr ra - nop END(mips_cps_cache_init) #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) @@ -635,7 +608,6 @@ LEAF(mips_cps_pm_save) psstate t1 SUSPEND_SAVE_STATIC jr v0 - nop END(mips_cps_pm_save) LEAF(mips_cps_pm_restore) @@ -646,3 +618,134 @@ LEAF(mips_cps_pm_restore) END(mips_cps_pm_restore) #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */ + +#ifdef CONFIG_CPU_NANOMIPS +/* + * Hardcode PM entry / exit code for Shaolin + * until uasm grows support for nanomips. + */ +LEAF(nanomips_cps_nc_entry_fn_2) + /* Enter state 2 (power gated) */ + cmgcrb t8 + + /* if (coupled_coherence) { == 1 */ + sync +lbl_incready: + ll t1, 0(a1) + addiu t2, t1, 1 + sc t2, 0(a1) + beqz t2, lbl_incready + addiu t1, t1, 1 + sync + + /* If last VPE, go to disable coherence */ + beq t1, a0, lbl_disable_coherence + nop + + /* Halt this TC to stop VPE */ + addiu t0, zero, TCHALT_H + mtc0 t0, CP0_TCHALT + +lbl_secondary_hang: + b lbl_secondary_hang + nop + + +lbl_disable_coherence: + /* Invalidate L1 icache */ + mfc0 ta0, CP0_CONFIG, 1 + + /* Detect I-cache line size */ + _EXT t0, ta0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ + li t1, 2 + beqz t0, icache_invalidated + sllv t0, t1, t0 + + /* Detect I-cache size */ + _EXT t1, ta0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ + xori t2, t1, 0x7 + li t3, 32 + beqz t2, 1f + addiu t1, t1, 1 + sllv t1, t3, t1 +1: /* At this point t1 == I-cache sets per way */ + _EXT t2, ta0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ + addiu t2, t2, 1 + mul t1, t1, t0 + mul t1, t1, t2 + + li a0, CKSEG0 + PTR_ADD a1, a0, t1 +1: cache Index_Invalidate_I, 0(a0) + PTR_ADD a0, a0, t0 + bne a0, a1, 1b + nop + +icache_invalidated: + /* Invalidate L1 dcache */ + + /* Detect D-cache line size */ + _EXT t0, ta0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ + li t1, 2 + beqz t0, dcache_writtenback + sllv t0, t1, t0 + + /* Detect D-cache size */ + _EXT t1, ta0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ + xori t2, t1, 0x7 + li t3, 32 + beqz t2, 1f + addiu t1, t1, 1 + sllv t1, t3, t1 +1: /* At this point t1 == D-cache sets per way */ + _EXT t2, ta0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ + addiu t2, t2, 1 + mul t1, t1, t0 + mul t1, t1, t2 + + li a0, CKSEG0 + PTR_ADDU a1, a0, t1 +1: cache Index_Writeback_Inv_D, 0(a0) + PTR_ADD a0, a0, t0 + bne a0, a1, 1b + nop + +dcache_writtenback: + /* Barrier ensuring previous cache invalidates are complete */ + sync + ehb + + /* Disable all but self interventions */ + PTR_L t1, GCR_CL_ID_OFS(t8) + _EXT t1, t1, CM3_GCR_Cx_ID_CORENUM_SHF, 8 + LI t2, 1 + sllv t1, t2, t1 + PTR_S t1, GCR_CL_COHERENCE_OFS(t8) + PTR_L t1, GCR_CL_COHERENCE_OFS(t8) + + /* Barrier to ensure write to coherence control is complete */ + sync + ehb + + /* Disable coherence */ + PTR_S zero, GCR_CL_COHERENCE_OFS(t8) + PTR_L t1, GCR_CL_COHERENCE_OFS(t8) + + /* Find base address of CPC */ + PTR_L t1, GCR_CPC_BASE_OFS(t8) + PTR_LI t2, ~0x7fff + and t1, t1, t2 + PTR_LI t2, UNCAC_BASE + PTR_ADD t1, t1, t2 + + /* Powerdown this core via CPC */ + addiu t2, zero, CPC_CL_CMD_PWRDOWN + PTR_S t2, CPC_CL_CMD_OFS(t1) + +lbl_hang: + b lbl_hang + nop + + END(nanomips_cps_nc_entry_fn_2) + +#endif /* CONFIG_CPU_NANOMIPS */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 3603be12a6ef..293fb9b7b288 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -44,8 +44,12 @@ static inline unsigned long cpu_get_fpu_id(void) unsigned long tmp, fpu_id; tmp = read_c0_status(); - __enable_fpu(FPU_AS_IS); - fpu_id = read_32bit_cp1_register(CP1_REVISION); + if (!__enable_fpu(FPU_AS_IS)) { + fpu_id = read_32bit_cp1_register(CP1_REVISION); + } else { + /* We were unable to enable the FPU */ + fpu_id = 0; + } write_c0_status(tmp); return fpu_id; } @@ -171,6 +175,9 @@ static void cpu_set_nofpu_2008(struct cpuinfo_mips *c) switch (ieee754) { case STRICT: +#ifdef CONFIG_CPU_NANOMIPS + c->options |= MIPS_CPU_NAN_2008; +#else if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { @@ -179,6 +186,7 @@ static void cpu_set_nofpu_2008(struct cpuinfo_mips *c) c->options |= MIPS_CPU_NAN_LEGACY; c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; } +#endif break; case LEGACY: c->options |= MIPS_CPU_NAN_LEGACY; @@ -504,6 +512,11 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) case MIPS_CPU_ISA_II: c->isa_level |= MIPS_CPU_ISA_II; break; + + /* nanoMIPS is backwards incompatible */ + case MIPS_CPU_ISA_NANO32R6: + c->isa_level |= MIPS_CPU_ISA_NANO32R6; + break; } } @@ -617,6 +630,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c) case 2: set_isa(c, MIPS_CPU_ISA_M32R6); break; + case 3: + set_isa(c, MIPS_CPU_ISA_NANO32R6); + break; default: goto unknown; } @@ -662,7 +678,7 @@ static inline unsigned int decode_config1(struct cpuinfo_mips *c) c->ases |= MIPS_ASE_MIPS16; if (config1 & MIPS_CONF1_EP) c->options |= MIPS_CPU_EJTAG; - if (config1 & MIPS_CONF1_FP) { + if (IS_ENABLED(CONFIG_FP_SUPPORT) && (config1 & MIPS_CONF1_FP)) { c->options |= MIPS_CPU_FPU; c->options |= MIPS_CPU_32FPR; } @@ -1666,11 +1682,22 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_M6250; __cpu_name[cpu] = "MIPS M6250"; break; + case PRID_IMP_I7200: + c->cputype = CPU_I7200; + __cpu_name[cpu] = "MIPS I7200"; + break; } decode_configs(c); - spram_config(); + if (!IS_ENABLED(CONFIG_CPU_NANOMIPS)) { + /* + * nanoMIPS toolchain seems to miscompile spram_config()... + * + * TODO: Remove this once fixed. + */ + spram_config(); + } switch (__get_cpu_type(c->cputype)) { case CPU_I6500: diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c index eed099f35bf1..8e6f4c02f1b0 100644 --- a/arch/mips/kernel/csrc-r4k.c +++ b/arch/mips/kernel/csrc-r4k.c @@ -34,7 +34,7 @@ static inline unsigned int rdhwr_count(void) __asm__ __volatile__( " .set push\n" - " .set mips32r2\n" + " .set " MIPS_ISA_LEVEL "\n" " rdhwr %0, $2\n" " .set pop\n" : "=r" (count)); diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index 731325a61a78..7351133868e0 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -156,7 +156,9 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr, * Determine the NaN personality, reject the binary if not allowed. * Also ensure that any interpreter matches the executable. */ - if (flags & EF_MIPS_NAN2008) { + if (IS_ENABLED(CONFIG_CPU_NANOMIPS)) { + state->nan_2008 = 1; + } else if (flags & EF_MIPS_NAN2008) { if (mips_use_nan_2008) state->nan_2008 = 1; else @@ -192,7 +194,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr, abi0 = abi1 = fp_abi; } - if (elf32 && !(flags & EF_MIPS_ABI2)) { + if (elf32 && !(flags & EF_MIPS_ABI2) && !IS_ENABLED(CONFIG_CPU_NANOMIPS)) { /* Default to a mode capable of running code expecting FR=0 */ state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0; diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 9f7347211ab4..35be7e791a63 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -10,6 +10,7 @@ */ #include +#include #include #include #include @@ -40,11 +41,20 @@ NESTED(except_vec3_generic, 0, sp) #ifdef CONFIG_64BIT dsll k1, k1, 1 #endif +#ifdef CONFIG_CPU_NANOMIPS + PTR_LA k0, exception_handlers + PTR_ADDU \ + k0, k0, k1 + PTR_L k0, 0(k0) +#else PTR_L k0, exception_handlers(k1) +#endif jr k0 .set pop END(except_vec3_generic) +#if __mips_isa_rev < 6 + /* * General exception handler for CPUs with virtual coherency exception. * @@ -105,33 +115,24 @@ handle_vcei: .set pop END(except_vec3_r4000) +#endif /* __mips_isa_rev < 6 */ + __FINIT .align 5 /* 32 byte rollback region */ LEAF(__r4k_wait) - .set push - .set noreorder /* start of rollback region */ LONG_L t0, TI_FLAGS($28) nop andi t0, _TIF_NEED_RESCHED bnez t0, 1f - nop - nop - nop -#ifdef CONFIG_CPU_MICROMIPS - nop - nop - nop - nop -#endif + .set push .set MIPS_ISA_ARCH_LEVEL_RAW wait + .set pop /* end of rollback region (the region size must be power of two) */ 1: jr ra - nop - .set pop END(__r4k_wait) .macro BUILD_ROLLBACK_PROLOGUE handler @@ -272,15 +273,20 @@ BUILD_ROLLBACK_PROLOGUE except_vec_vi NESTED(except_vec_vi, 0, sp) SAVE_SOME docfi=1 SAVE_AT docfi=1 + PTR_LA v1, except_vec_vi_handler .set push .set noreorder - PTR_LA v1, except_vec_vi_handler +#ifdef CONFIG_CPU_NANOMIPS +FEXPORT(except_vec_vi_li48) + li48 v0, 0 /* Patched */ +#else FEXPORT(except_vec_vi_lui) lui v0, 0 /* Patched */ - jr v1 FEXPORT(except_vec_vi_ori) - ori v0, 0 /* Patched */ + ori v0, 0 /* Patched */ +#endif .set pop + jr v1 END(except_vec_vi) EXPORT(except_vec_vi_end) @@ -365,7 +371,7 @@ NESTED(ejtag_debug_handler, PT_SIZE, sp) ejtag_return: MFC0 k0, CP0_DESAVE - .set mips32 + .set MIPS_ISA_LEVEL_RAW deret .set pop END(ejtag_debug_handler) @@ -433,7 +439,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .macro __build_clear_fpe .set push /* gas fails to assemble cfc1 for some archs (octeon).*/ \ - .set mips1 + .set MIPS_ISA_LEVEL_RAW SET_HARDFLOAT cfc1 a1, fcr31 .set pop @@ -453,6 +459,18 @@ NESTED(nmi_handler, PT_SIZE, sp) KMODE .endm + .macro __build_clear_bp +#ifdef __nanomips__ + mfc0 t0, CP0_BADINSTR + li t1, 0x001001ff /* break 511 */ + bnec t0, t1, 1f + li t2, CKSEG1ADDR(0x1fc00000) + sw zero, 0(t2) +1: +#endif + __build_clear_sti + .endm + .macro __BUILD_silent exception .endm @@ -501,7 +519,7 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER ades ade ade silent /* #5 */ BUILD_HANDLER ibe be cli silent /* #6 */ BUILD_HANDLER dbe be cli silent /* #7 */ - BUILD_HANDLER bp bp sti silent /* #9 */ + BUILD_HANDLER bp bp bp silent /* #9 */ BUILD_HANDLER ri ri sti silent /* #10 */ BUILD_HANDLER cpu cpu sti silent /* #11 */ BUILD_HANDLER ov ov sti silent /* #12 */ @@ -525,6 +543,8 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER dsp dsp sti silent /* #26 */ BUILD_HANDLER reserved reserved sti verbose /* others */ +#ifndef CONFIG_CPU_NANOMIPS + .align 5 LEAF(handle_ri_rdhwr_tlbp) .set push @@ -550,20 +570,19 @@ NESTED(nmi_handler, PT_SIZE, sp) LEAF(handle_ri_rdhwr) .set push .set noat - .set noreorder /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ MFC0 k1, CP0_EPC #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) and k0, k1, 1 + xor k1, k0 beqz k0, 1f - xor k1, k0 lhu k0, (k1) lhu k1, 2(k1) ins k1, k0, 16, 16 lui k0, 0x007d + ori k0, 0x6b3c b docheck - ori k0, 0x6b3c 1: lui k0, 0x7c03 lw k1, (k1) @@ -575,7 +594,6 @@ NESTED(nmi_handler, PT_SIZE, sp) lw k1, (k1) ori k0, 0xe83b #endif - .set reorder docheck: bne k0, k1, handle_ri /* if not ours */ @@ -609,6 +627,8 @@ isrdhwr: .set pop END(handle_ri_rdhwr) +#endif /* !CONFIG_CPU_NANOMIPS */ + #ifdef CONFIG_64BIT /* A temporary overflow handler used by check_daddi(). */ diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 7c246b69c545..aeb1c080e4dd 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -64,10 +64,10 @@ void r4k_wait_irqoff(void) { if (!need_resched()) __asm__( - " .set push \n" - " .set arch=r4000 \n" - " wait \n" - " .set pop \n"); + " .set push \n" + " .set " MIPS_ISA_LEVEL " \n" + " wait \n" + " .set pop \n"); local_irq_enable(); } @@ -80,13 +80,13 @@ static void rm7k_wait_irqoff(void) if (!need_resched()) __asm__( " .set push \n" - " .set arch=r4000 \n" + " .set " MIPS_ISA_LEVEL " \n" " .set noat \n" - " mfc0 $1, $12 \n" + " mfc0 $at, $12 \n" " sync \n" - " mtc0 $1, $12 # stalls until W stage \n" + " mtc0 $at, $12 # stalls until W stage \n" " wait \n" - " mtc0 $1, $12 # stalls until W stage \n" + " mtc0 $at, $12 # stalls until W stage \n" " .set pop \n"); local_irq_enable(); } @@ -101,7 +101,8 @@ static void au1k_wait(void) unsigned long c0status = read_c0_status() | 1; /* irqs on */ __asm__( - " .set arch=r4000 \n" + " .set push \n" + " .set " MIPS_ISA_LEVEL " \n" " cache 0x14, 0(%0) \n" " cache 0x14, 32(%0) \n" " sync \n" @@ -111,7 +112,7 @@ static void au1k_wait(void) " nop \n" " nop \n" " nop \n" - " .set mips0 \n" + " .set pop \n" : : "r" (au1k_wait), "r" (c0status)); } diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index d5f7362e8c24..975b43ad0a4e 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include #include @@ -228,6 +230,51 @@ void mips_mt_set_cpuoptions(void) } } +#ifdef CONFIG_MIPS_MT_RAND_SCHED_POLICY + +static bool __mips_mt_randomize_sched_policy; + +void mips_mt_randomize_sched_policy(void) +{ + unsigned int ctl; + + /* Optimize code out for kernels that will never run on I7200 */ + if (__builtin_constant_p(boot_cpu_type() != CPU_I7200) && + (boot_cpu_type() != CPU_I7200)) + return; + + /* Only randomize policy if the user asks for it */ + if (!__mips_mt_randomize_sched_policy) + return; + + /* Enable PM_Prio pins */ + write_c0_tcschedule(I7200_TCSCHEDULE_PRIO_EN); + + /* Toggle greedy/QoS mode */ + ctl = read_c0_mvpcontrol(); + ctl ^= BIT(16); + write_c0_mvpcontrol(ctl); +} + +static int __init parse_mt_random_policy(char *arg) +{ + switch (boot_cpu_type()) { + case CPU_I7200: + pr_info("MIPS: Enabling randomized MT scheduling policy\n"); + __mips_mt_randomize_sched_policy = true; + break; + + default: + pr_warn("MIPS: Randomized MT scheduling policy unsupported\n"); + break; + } + + return 0; +} +early_param("mt_random_policy", parse_mt_random_policy); + +#endif /* CONFIG_MIPS_MT_RAND_SCHED_POLICY */ + struct class *mt_class; static int __init mt_init(void) diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index f298eb2ff6c2..c4fac25fa654 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -47,6 +47,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, { unsigned long sp = regs->regs[29]; #ifdef CONFIG_KALLSYMS + unsigned long fp = regs->regs[30]; unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; @@ -62,7 +63,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, perf_callchain_store(entry, pc); if (entry->nr >= entry->max_stack) break; - pc = unwind_stack(current, &sp, pc, &ra); + pc = unwind_stack(current, &sp, &fp, pc, &ra); } while (pc); #else save_raw_perf_callchain(entry, sp); diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 6668f67a61c3..ba326a93f9b2 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -1630,6 +1630,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) #endif break; case CPU_INTERAPTIV: + case CPU_I7200: if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id)) raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; else @@ -1778,6 +1779,11 @@ init_hw_perf_events(void) mipspmu.general_event_map = &i6x00_event_map; mipspmu.cache_event_map = &i6x00_cache_map; break; + case CPU_I7200: + mipspmu.name = "mips/I7200"; + mipspmu.general_event_map = &mipsxxcore_event_map; + mipspmu.cache_event_map = &mipsxxcore_cache_map; + break; case CPU_1004K: mipspmu.name = "mips/1004K"; mipspmu.general_event_map = &mipsxxcore_event_map; diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index e8bfe6bb94f9..b9b4c309f102 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -346,6 +346,19 @@ static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, uasm_i_nop(pp); } +#ifdef CONFIG_CPU_NANOMIPS + +extern unsigned nanomips_cps_nc_entry_fn_2(unsigned online, u32 *nc_ready_count); + +cps_nc_entry_fn nanomips_cps_nc_entry_fns[CPS_PM_STATE_COUNT] = +{ + NULL, + NULL, + nanomips_cps_nc_entry_fn_2 +}; + +#endif + static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) { struct uasm_label *l = labels; @@ -371,6 +384,11 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) lbl_decready, }; +#ifdef CONFIG_CPU_NANOMIPS + /* Use hardcoded entry function for now */ + return nanomips_cps_nc_entry_fns[state]; +#endif + /* Allocate a buffer to hold the generated code */ p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); if (!buf) diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index f509f764e976..0dbabfcc15bf 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -140,6 +140,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "%s", " mips64r2"); if (cpu_has_mips64r6) seq_printf(m, "%s", " mips64r6"); + if (cpu_has_nanomips32r6) + seq_printf(m, "%s", " nanomips32r6"); seq_printf(m, "\n"); seq_printf(m, "ASEs implemented\t:"); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index db6bf483e671..7a29f007e062 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -151,8 +151,12 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp, /* user thread */ *childregs = *regs; +#ifdef CONFIG_CPU_NANOMIPS + childregs->regs[4] = 0; /* Child gets zero as return value */ +#else childregs->regs[7] = 0; /* Clear error flag */ childregs->regs[2] = 0; /* Child gets zero as return value */ +#endif if (usp) childregs->regs[29] = usp; ti->addr_limit = USER_DS; @@ -506,6 +510,7 @@ static unsigned long thread_saved_pc(struct task_struct *tsk) /* generic stack unwinding function */ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, unsigned long *sp, + unsigned long *fp, unsigned long pc, unsigned long *ra) { @@ -514,6 +519,14 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, unsigned long size, ofs; struct pt_regs *regs; int leaf; +#if defined(CONFIG_FRAME_POINTER) + /* + * NanoMIPS gcc uses a 4096 byte bias between the fp register value + * and the logical fp. This allows greater stack coverage using 12 bit + * offsets from the fp register + */ + unsigned long lfp = *fp + 4096; +#endif if (!stack_page) return 0; @@ -553,6 +566,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, pc = regs->cp0_epc; if (!user_mode(regs) && __kernel_text_address(pc)) { *sp = regs->regs[29]; + *fp = regs->regs[30]; *ra = regs->regs[31]; return pc; } @@ -569,6 +583,27 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, return pc; } +#ifdef CONFIG_FRAME_POINTER + /* Is the frame pointer in the right ball park? */ + if ((lfp >= low) && (lfp <= high)) { + if (unlikely(lfp == irq_stack_high)) { + /* + * This frame is at the top of the IRQ stack. Set SP + * so that the next iteration will detect the top of + * IRQ stack and jump to the interrupted task stack. + */ + *sp = irq_stack_high; + } + + /* Retrieve pc / fp from save location */ + pc = ((unsigned long *)(lfp))[-2]; + *fp = ((unsigned long *)(lfp))[-1]; + + return __kernel_text_address(pc) ? pc : 0; + } else { + return 0; + } +#endif /* CONFIG_ARCH_WANT_FRAME_POINTERS */ info.func = (void *)(pc - ofs); info.func_size = ofs; /* analyze from start to ofs */ leaf = get_frame_info(&info); @@ -597,7 +632,8 @@ EXPORT_SYMBOL(unwind_stack_by_address); /* used by show_backtrace() */ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, - unsigned long pc, unsigned long *ra) + unsigned long *fp, unsigned long pc, + unsigned long *ra) { unsigned long stack_page = 0; int cpu; @@ -612,7 +648,7 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, if (!stack_page) stack_page = (unsigned long)task_stack_page(task); - return unwind_stack_by_address(stack_page, sp, pc, ra); + return unwind_stack_by_address(stack_page, sp, fp, pc, ra); } #endif @@ -623,7 +659,7 @@ unsigned long get_wchan(struct task_struct *task) { unsigned long pc = 0; #ifdef CONFIG_KALLSYMS - unsigned long sp; + unsigned long sp, fp; unsigned long ra = 0; #endif @@ -636,9 +672,10 @@ unsigned long get_wchan(struct task_struct *task) #ifdef CONFIG_KALLSYMS sp = task->thread.reg29 + schedule_mfi.frame_size; + fp = task->thread.reg30; while (in_sched_functions(pc)) - pc = unwind_stack(task, &sp, pc, &ra); + pc = unwind_stack(task, &sp, &fp, pc, &ra); #endif put_task_stack(task); @@ -679,6 +716,10 @@ int mips_get_process_fp_mode(struct task_struct *task) { int value = 0; + /* We can do nothing sensible if we have no FP support */ + if (!IS_ENABLED(CONFIG_FP_SUPPORT)) + return -EOPNOTSUPP; + if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) value |= PR_FP_MODE_FR; if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) @@ -707,6 +748,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) struct cpumask process_cpus; int cpu; + /* We can do nothing sensible if we have no FP support */ + if (!IS_ENABLED(CONFIG_FP_SUPPORT)) + return -EOPNOTSUPP; + /* If nothing to change, return right away, successfully. */ if (value == mips_get_process_fp_mode(task)) return 0; diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 0b23b1ad99e6..95d258a19b31 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -202,6 +202,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) int ptrace_get_watch_regs(struct task_struct *child, struct pt_watch_regs __user *addr) { + unsigned long watchhi_mask = MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW; enum pt_watch_style style; int i; @@ -211,7 +212,12 @@ int ptrace_get_watch_regs(struct task_struct *child, return -EIO; #ifdef CONFIG_32BIT - style = pt_watch_style_mips32; + if (boot_cpu_type() == CPU_I7200) { + style = pt_watch_style_i7200; + watchhi_mask = MIPS_WATCHHI_IRW_RSLT | MIPS_WATCHHI_IRW; + } else { + style = pt_watch_style_mips32; + } #define WATCH_STYLE mips32 #else style = pt_watch_style_mips64; @@ -225,7 +231,7 @@ int ptrace_get_watch_regs(struct task_struct *child, __put_user(child->thread.watch.mips3264.watchlo[i], &addr->WATCH_STYLE.watchlo[i]); __put_user(child->thread.watch.mips3264.watchhi[i] & - (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW), + watchhi_mask, &addr->WATCH_STYLE.watchhi[i]); __put_user(boot_cpu_data.watch_reg_masks[i], &addr->WATCH_STYLE.watch_masks[i]); @@ -242,6 +248,7 @@ int ptrace_get_watch_regs(struct task_struct *child, int ptrace_set_watch_regs(struct task_struct *child, struct pt_watch_regs __user *addr) { + enum pt_watch_style style; int i; int watch_active = 0; unsigned long lt[NUM_WATCH_REGS]; @@ -251,6 +258,16 @@ int ptrace_set_watch_regs(struct task_struct *child, return -EIO; if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) return -EIO; + +#ifdef CONFIG_32BIT + if (boot_cpu_type() == CPU_I7200) + style = pt_watch_style_i7200; + else + style = pt_watch_style_mips32; +#else + style = pt_watch_style_mips64; +#endif + /* Check the values. */ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); @@ -267,13 +284,24 @@ int ptrace_set_watch_regs(struct task_struct *child, } #endif __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); - if (ht[i] & ~MIPS_WATCHHI_MASK) - return -EINVAL; + if (style == pt_watch_style_i7200) { + if (ht[i] & MIPS_WATCHHI_MASK) + return -EINVAL; + } else { + if (ht[i] & ~MIPS_WATCHHI_MASK) + return -EINVAL; + } } /* Install them. */ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { - if (lt[i] & MIPS_WATCHLO_IRW) - watch_active = 1; + if (style == pt_watch_style_i7200) { + /* i7200 watch enabled via watchhi IRW bits */ + if (ht[i] & MIPS_WATCHHI_IRW) + watch_active = 1; + } else { + if (lt[i] & MIPS_WATCHLO_IRW) + watch_active = 1; + } child->thread.watch.mips3264.watchlo[i] = lt[i]; /* Set the G bit. */ child->thread.watch.mips3264.watchhi[i] = ht[i]; diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 8e3a6020c613..63c0129154ce 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -27,6 +27,7 @@ .macro EX insn, reg, src .set push SET_HARDFLOAT + .set noreorder .set nomacro .ex\@: \insn \reg, \src .set pop @@ -165,7 +166,8 @@ LEAF(_init_fpu) mtc1 t1, $f30 mtc1 t1, $f31 -#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || \ + defined(CONFIG_CPU_NANOMIPS32_R6) .set push .set MIPS_ISA_LEVEL_RAW .set fp=64 @@ -205,7 +207,8 @@ LEAF(_init_fpu) mthc1 t1, $f30 mthc1 t1, $f31 1: .set pop -#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ +#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 || + CONFIG_CPU_NANOMIPS32_R6 */ #else .set MIPS_ISA_ARCH_LEVEL_RAW dmtc1 t1, $f0 @@ -230,8 +233,6 @@ LEAF(_init_fpu) .set pop /* SET_HARDFLOAT */ - .set noreorder - /** * _save_fp_context() - save FP context from the FPU * @a0 - pointer to fpregs field of sigcontext @@ -256,7 +257,6 @@ LEAF(_save_fp_context) mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip storing odd if FR=0 - nop #endif /* Store the 16 odd double precision registers */ EX sdc1 $f1, 8(a0) @@ -298,8 +298,8 @@ LEAF(_save_fp_context) EX sdc1 $f28, 224(a0) EX sdc1 $f30, 240(a0) EX sw t1, 0(a1) + li v0, 0 # success jr ra - li v0, 0 # success .set pop END(_save_fp_context) @@ -324,7 +324,6 @@ LEAF(_restore_fp_context) mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip loading odd if FR=0 - nop #endif EX ldc1 $f1, 8(a0) EX ldc1 $f3, 24(a0) @@ -364,8 +363,8 @@ LEAF(_restore_fp_context) EX ldc1 $f30, 240(a0) ctc1 t1, fcr31 .set pop + li v0, 0 # success jr ra - li v0, 0 # success END(_restore_fp_context) #ifdef CONFIG_CPU_HAS_MSA @@ -385,7 +384,7 @@ LEAF(\name) PTR_LA t1, 0f PTR_ADDU t0, t0, t1 jr t0 - nop + nop op_one_wr \op, 0, a1 op_one_wr \op, 1, a1 op_one_wr \op, 2, a1 @@ -489,8 +488,8 @@ LEAF(_save_msa_all_upper) save_msa_upper 29, 0xe8, a0 save_msa_upper 30, 0xf0, a0 save_msa_upper 31, 0xf8, a0 + li v0, 0 jr ra - li v0, 0 END(_save_msa_all_upper) .macro restore_msa_upper wr, off, base @@ -546,14 +545,12 @@ LEAF(_restore_msa_all_upper) restore_msa_upper 29, 0xe8, a0 restore_msa_upper 30, 0xf0, a0 restore_msa_upper 31, 0xf8, a0 + li v0, 0 jr ra - li v0, 0 END(_restore_msa_all_upper) #endif /* CONFIG_CPU_HAS_MSA */ - .set reorder - .type fault, @function .ent fault fault: li v0, -EFAULT # failure diff --git a/arch/mips/kernel/scall32-p32.S b/arch/mips/kernel/scall32-p32.S new file mode 100644 index 000000000000..f1ed2de3e672 --- /dev/null +++ b/arch/mips/kernel/scall32-p32.S @@ -0,0 +1,88 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + * Copyright (C) 2017 Imagination Technologies Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + .align 5 +NESTED(handle_sys, PT_SIZE, sp) + .set noat + SAVE_SOME + TRACE_IRQS_ON_RELOAD + STI + .set at + + # There is a SYSCALL[16] encoding too, but we only support SYSCALL[32] + PTR_L t1, PT_EPC(sp) # skip syscall on return + PTR_ADDIU t1, 4 # skip to next instruction + PTR_S t1, PT_EPC(sp) + + LONG_S v0, PT_R26(sp) # save v0/a0 for syscall restarting + + li t1, _TIF_WORK_SYSCALL_ENTRY + LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? + and t0, t1, t0 + bnez t0, syscall_trace_entry + +syscall_common: + # Syscall number is in t4 ($2) + sltiu t0, t4, __NR_syscalls + beqz t0, illegal_syscall + + PTR_LA t0, sys_call_table + PTR_LXS t0, t4(t0) # load entry from table + + jalr t0 # Do The Real Thing (TM) + + li t0, 1 # trigger syscall restart check + LONG_S t0, PT_R0(sp) + LONG_S v0, PT_R4(sp) # result + +syscall_exit: + j syscall_exit_partial + +/* ------------------------------------------------------------------------ */ + +syscall_trace_entry: + SAVE_STATIC + move a0, sp + move a1, t4 + jal syscall_trace_enter + + bltz v0, 1f # seccomp failed? Skip syscall + + RESTORE_STATIC + LONG_L t4, PT_R2(sp) # Restore syscall (maybe modified) + LONG_L a0, PT_R4(sp) # Restore argument registers + LONG_L a1, PT_R5(sp) + LONG_L a2, PT_R6(sp) + LONG_L a3, PT_R7(sp) + LONG_L a4, PT_R8(sp) + LONG_L a5, PT_R9(sp) + j syscall_common + +1: j syscall_exit + +illegal_syscall: + /* This also isn't a valid syscall, throw an error. */ + li v0, -ENOSYS # error + LONG_S v0, PT_R4(sp) + j syscall_exit + END(handle_sys) diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 9e224469c788..f2c691c2de28 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -42,8 +42,10 @@ #include "signal-common.h" +#ifndef __MIPS_REDUCED_SIGCONTEXT static int (*save_fp_context)(void __user *sc); static int (*restore_fp_context)(void __user *sc); +#endif /* __MIPS_REDUCED_SIGCONTEXT */ struct sigframe { u32 sf_ass[4]; /* argument save space for o32 */ @@ -62,6 +64,7 @@ struct rt_sigframe { struct ucontext rs_uc; }; +#ifndef __MIPS_REDUCED_SIGCONTEXT /* * Thread saved context copy to/from a signal context presumed to be on the * user stack, and therefore accessed with appropriate macros from uaccess.h. @@ -124,6 +127,7 @@ static int restore_hw_fp_context(void __user *sc) return _restore_fp_context(fpregs, csr); } +#endif /* !__MIPS_REDUCED_SIGCONTEXT */ /* * Extended context handling. @@ -142,6 +146,7 @@ static inline void __user *sc_to_extcontext(void __user *sc) return &uc->uc_extcontext; } +#ifndef __MIPS_REDUCED_SIGCONTEXT static int save_msa_extcontext(void __user *buf) { struct msa_extcontext __user *msa = buf; @@ -233,15 +238,37 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size) return err; } +#endif /* !__MIPS_REDUCED_SIGCONTEXT */ static int save_extcontext(void __user *buf) { - int sz; + int sz = 0, __maybe_unused res; + +#if 0 + /* TODO implement DSP support on nanoMIPS */ + res = save_dsp_extcontext(buf); + if (res < 0) + return res; + buf += res; + sz += res; +#endif - sz = save_msa_extcontext(buf); - if (sz < 0) - return sz; - buf += sz; +#if defined(CONFIG_FP_SUPPORT) && defined(__MIPS_REDUCED_SIGCONTEXT) + /* TODO implement FP support on nanoMIPS */ + res = save_fpu_extcontext(buf); + if (res < 0) + return res; + buf += res; + sz += res; +#endif /* CONFIG_FP_SUPPORT && __MIPS_REDUCED_SIGCONTEXT */ + +#ifndef __MIPS_REDUCED_SIGCONTEXT + res = save_msa_extcontext(buf); + if (res < 0) + return res; + buf += res; + sz += res; +#endif /* __MIPS_REDUCED_SIGCONTEXT */ /* If no context was saved then trivially return */ if (!sz) @@ -274,9 +301,11 @@ static int restore_extcontext(void __user *buf) return err; switch (ext.magic) { +#ifndef __MIPS_REDUCED_SIGCONTEXT case MSA_EXTCONTEXT_MAGIC: err = restore_msa_extcontext(buf, ext.size); break; +#endif /* __MIPS_REDUCED_SIGCONTEXT */ default: err = -EINVAL; @@ -296,8 +325,10 @@ static int restore_extcontext(void __user *buf) int protected_save_fp_context(void __user *sc) { struct mips_abi *abi = current->thread.abi; +#ifndef __MIPS_REDUCED_SIGCONTEXT uint64_t __user *fpregs = sc + abi->off_sc_fpregs; uint32_t __user *csr = sc + abi->off_sc_fpc_csr; +#endif /* !__MIPS_REDUCED_SIGCONTEXT */ uint32_t __user *used_math = sc + abi->off_sc_used_math; unsigned int used, ext_sz; int err; @@ -318,6 +349,7 @@ int protected_save_fp_context(void __user *sc) if (IS_ENABLED(CONFIG_EVA)) lose_fpu(1); +#ifndef __MIPS_REDUCED_SIGCONTEXT while (1) { lock_fpu_owner(); if (is_fpu_owner()) { @@ -336,6 +368,7 @@ int protected_save_fp_context(void __user *sc) if (err) return err; /* really bad sigcontext */ } +#endif /* !__MIPS_REDUCED_SIGCONTEXT */ fp_done: ext_sz = err = save_extcontext(sc_to_extcontext(sc)); @@ -349,8 +382,10 @@ fp_done: int protected_restore_fp_context(void __user *sc) { struct mips_abi *abi = current->thread.abi; +#ifndef __MIPS_REDUCED_SIGCONTEXT uint64_t __user *fpregs = sc + abi->off_sc_fpregs; uint32_t __user *csr = sc + abi->off_sc_fpc_csr; +#endif /* !__MIPS_REDUCED_SIGCONTEXT */ uint32_t __user *used_math = sc + abi->off_sc_used_math; unsigned int used; int err, sig = 0, tmp __maybe_unused; @@ -369,6 +404,7 @@ int protected_restore_fp_context(void __user *sc) if (!(used & USED_FP)) goto fp_done; +#ifndef __MIPS_REDUCED_SIGCONTEXT err = sig = fpcsr_pending(csr); if (err < 0) return err; @@ -399,6 +435,7 @@ int protected_restore_fp_context(void __user *sc) if (err) break; /* really bad sigcontext */ } +#endif /* !__MIPS_REDUCED_SIGCONTEXT */ fp_done: if (!err && (used & USED_EXTCONTEXT)) @@ -421,6 +458,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) #ifdef CONFIG_CPU_HAS_SMARTMIPS err |= __put_user(regs->acx, &sc->sc_acx); #endif +#ifndef __MIPS_REDUCED_SIGCONTEXT err |= __put_user(regs->hi, &sc->sc_mdhi); err |= __put_user(regs->lo, &sc->sc_mdlo); if (cpu_has_dsp) { @@ -432,6 +470,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) err |= __put_user(mflo3(), &sc->sc_lo3); err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); } +#endif /* __MIPS_REDUCED_SIGCONTEXT */ /* @@ -455,6 +494,7 @@ static size_t extcontext_max_size(void) * the extended context for the current task at the current time. */ + /* FIXME handle nanoMIPS FPU / DSP extended context */ if (thread_msa_context_live()) sz += sizeof(struct msa_extcontext); @@ -486,7 +526,7 @@ int fpcsr_pending(unsigned int __user *fpcsr) int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { - unsigned long treg; + unsigned long __maybe_unused treg; int err = 0; int i; @@ -498,6 +538,8 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) #ifdef CONFIG_CPU_HAS_SMARTMIPS err |= __get_user(regs->acx, &sc->sc_acx); #endif + +#ifndef __MIPS_REDUCED_SIGCONTEXT err |= __get_user(regs->hi, &sc->sc_mdhi); err |= __get_user(regs->lo, &sc->sc_mdlo); if (cpu_has_dsp) { @@ -509,6 +551,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); } +#endif /* __MIPS_REDUCED_SIGCONTEXT */ for (i = 1; i < 32; i++) err |= __get_user(regs->regs[i], &sc->sc_regs[i]); @@ -592,13 +635,15 @@ SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, #endif #ifdef CONFIG_TRAD_SIGNALS -asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) +asmlinkage void sys_sigreturn(void) { struct sigframe __user *frame; + struct pt_regs *regs; sigset_t blocked; int sig; - frame = (struct sigframe __user *) regs.regs[29]; + regs = current_pt_regs(); + frame = (struct sigframe __user *) regs->regs[29]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) @@ -606,7 +651,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) set_current_blocked(&blocked); - sig = restore_sigcontext(®s, &frame->sf_sc); + sig = restore_sigcontext(regs, &frame->sf_sc); if (sig < 0) goto badframe; else if (sig) @@ -619,7 +664,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) "move\t$29, %0\n\t" "j\tsyscall_exit" :/* no outputs */ - :"r" (®s)); + :"r" (regs)); /* Unreached */ badframe: @@ -627,13 +672,15 @@ badframe: } #endif /* CONFIG_TRAD_SIGNALS */ -asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) +asmlinkage void sys_rt_sigreturn(void) { struct rt_sigframe __user *frame; + struct pt_regs *regs; sigset_t set; int sig; - frame = (struct rt_sigframe __user *) regs.regs[29]; + regs = current_pt_regs(); + frame = (struct rt_sigframe __user *) regs->regs[29]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) @@ -641,7 +688,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) set_current_blocked(&set); - sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); + sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); if (sig < 0) goto badframe; else if (sig) @@ -654,10 +701,10 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) * Don't let your children do this ... */ __asm__ __volatile__( - "move\t$29, %0\n\t" + "move\t$sp, %0\n\t" "j\tsyscall_exit" :/* no outputs */ - :"r" (®s)); + :"r" (regs)); /* Unreached */ badframe: @@ -758,8 +805,10 @@ struct mips_abi mips_abi = { .setup_rt_frame = setup_rt_frame, .restart = __NR_restart_syscall, +#ifndef __MIPS_REDUCED_SIGCONTEXT .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs), .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr), +#endif /* __MIPS_REDUCED_SIGCONTEXT */ .off_sc_used_math = offsetof(struct sigcontext, sc_used_math), .vdso = &vdso_image, @@ -780,25 +829,36 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) */ dsemul_thread_rollback(regs); - if (regs->regs[0]) { + if (regs->regs[0] && !is_syscall_success(regs)) { +#ifdef CONFIG_CPU_NANOMIPS + switch(-regs->regs[4]) { +#else switch(regs->regs[2]) { - case ERESTART_RESTARTBLOCK: - case ERESTARTNOHAND: - regs->regs[2] = EINTR; - break; +#endif case ERESTARTSYS: if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { + case ERESTART_RESTARTBLOCK: + /* FIXME horrible pile of hacks if no handler? */ + case ERESTARTNOHAND: +#ifdef CONFIG_CPU_NANOMIPS + regs->regs[4] = -EINTR; +#else regs->regs[2] = EINTR; +#endif break; } /* fallthrough */ case ERESTARTNOINTR: +#ifdef CONFIG_CPU_NANOMIPS + regs->regs[4] = regs->regs[26]; +#else regs->regs[7] = regs->regs[26]; regs->regs[2] = regs->regs[0]; +#endif regs->cp0_epc -= 4; } - regs->regs[0] = 0; /* Don't deal with this again. */ + regs->regs[0] = 0; /* Don't deal with this again. */ } if (sig_uses_siginfo(&ksig->ka, abi)) @@ -821,19 +881,31 @@ static void do_signal(struct pt_regs *regs) return; } - if (regs->regs[0]) { - switch (regs->regs[2]) { + if (regs->regs[0] && !is_syscall_success(regs)) { +#ifdef CONFIG_CPU_NANOMIPS + switch(-regs->regs[4]) { +#else + switch(regs->regs[2]) { +#endif case ERESTARTNOHAND: case ERESTARTSYS: case ERESTARTNOINTR: +#ifdef CONFIG_CPU_NANOMIPS + regs->regs[4] = regs->regs[26]; +#else regs->regs[2] = regs->regs[0]; regs->regs[7] = regs->regs[26]; +#endif regs->cp0_epc -= 4; break; case ERESTART_RESTARTBLOCK: regs->regs[2] = current->thread.abi->restart; +#ifdef CONFIG_CPU_NANOMIPS + regs->regs[4] = regs->regs[26]; +#else regs->regs[7] = regs->regs[26]; +#endif regs->cp0_epc -= 4; break; } @@ -873,7 +945,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, user_enter(); } -#ifdef CONFIG_SMP +#if !defined(__MIPS_REDUCED_SIGCONTEXT) && defined(CONFIG_SMP) static int smp_save_fp_context(void __user *sc) { return raw_cpu_has_fpu @@ -887,7 +959,7 @@ static int smp_restore_fp_context(void __user *sc) ? restore_hw_fp_context(sc) : copy_fp_from_sigcontext(sc); } -#endif +#endif /* __MIPS_REDUCED_SIGCONTEXT && CONFIG_SMP */ static int signal_setup(void) { @@ -901,6 +973,7 @@ static int signal_setup(void) (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) - offsetof(struct rt_sigframe, rs_uc.uc_mcontext))); +#ifndef __MIPS_REDUCED_SIGCONTEXT #ifdef CONFIG_SMP /* For now just do the cpu_has_fpu check when the functions are invoked */ save_fp_context = smp_save_fp_context; @@ -914,6 +987,7 @@ static int signal_setup(void) restore_fp_context = copy_fp_from_sigcontext; } #endif /* CONFIG_SMP */ +#endif /* !__MIPS_REDUCED_SIGCONTEXT */ return 0; } diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index b672cebb4a1a..8f63497c585f 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -64,13 +64,15 @@ struct rt_sigframe_n32 { struct ucontextn32 rs_uc; }; -asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) +asmlinkage void sysn32_rt_sigreturn(void) { struct rt_sigframe_n32 __user *frame; + struct pt_regs *regs; sigset_t set; int sig; - frame = (struct rt_sigframe_n32 __user *) regs.regs[29]; + regs = current_pt_regs(); + frame = (struct rt_sigframe_n32 __user *) regs->regs[29]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) @@ -78,7 +80,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) set_current_blocked(&set); - sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); + sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); if (sig < 0) goto badframe; else if (sig) @@ -94,7 +96,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) "move\t$29, %0\n\t" "j\tsyscall_exit" :/* no outputs */ - :"r" (®s)); + :"r" (regs)); /* Unreached */ badframe: diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c index 2b3572fb5f1b..415655fa799d 100644 --- a/arch/mips/kernel/signal_o32.c +++ b/arch/mips/kernel/signal_o32.c @@ -151,13 +151,15 @@ static int setup_frame_32(void *sig_return, struct ksignal *ksig, return 0; } -asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) +asmlinkage void sys32_rt_sigreturn(void) { struct rt_sigframe32 __user *frame; + struct pt_regs *regs; sigset_t set; int sig; - frame = (struct rt_sigframe32 __user *) regs.regs[29]; + regs = current_pt_regs(); + frame = (struct rt_sigframe32 __user *) regs->regs[29]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) @@ -165,7 +167,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) set_current_blocked(&set); - sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); + sig = restore_sigcontext32(regs, &frame->rs_uc.uc_mcontext); if (sig < 0) goto badframe; else if (sig) @@ -181,7 +183,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) "move\t$29, %0\n\t" "j\tsyscall_exit" :/* no outputs */ - :"r" (®s)); + :"r" (regs)); /* Unreached */ badframe: @@ -251,13 +253,15 @@ struct mips_abi mips_abi_32 = { }; -asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) +asmlinkage void sys32_sigreturn(void) { struct sigframe32 __user *frame; + struct pt_regs *regs; sigset_t blocked; int sig; - frame = (struct sigframe32 __user *) regs.regs[29]; + regs = current_pt_regs(); + frame = (struct sigframe32 __user *) regs->regs[29]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) @@ -265,7 +269,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) set_current_blocked(&blocked); - sig = restore_sigcontext32(®s, &frame->sf_sc); + sig = restore_sigcontext32(regs, &frame->sf_sc); if (sig < 0) goto badframe; else if (sig) @@ -278,7 +282,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) "move\t$29, %0\n\t" "j\tsyscall_exit" :/* no outputs */ - :"r" (®s)); + :"r" (regs)); /* Unreached */ badframe: diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index d79153cd2baf..a20e15e21947 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -161,7 +161,11 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) * s0 = kseg0 CCA */ entry_code = (u32 *)&mips_cps_core_entry; +#ifdef __nanomips__ + *entry_code++ = 0x9008d000 | cca; +#else uasm_i_addiu(&entry_code, 16, 0, cca); +#endif blast_dcache_range((unsigned long)&mips_cps_core_entry, (unsigned long)entry_code); bc_wback_inv((unsigned long)&mips_cps_core_entry, @@ -515,7 +519,8 @@ static void cps_init_secondary(void) if (cpu_has_mipsmt) dmt(); - if (mips_cm_revision() >= CM_REV_CM3) { + if ((mips_cm_revision() >= CM_REV_CM3) || + (mips_cm_revision() == CM_REV_CM2_6)) { unsigned int ident = read_gic_vl_ident(); /* diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index d6e6cf75114d..3a9921a12ec4 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c @@ -208,7 +208,6 @@ void spram_config(void) case CPU_INTERAPTIV: case CPU_PROAPTIV: case CPU_P5600: - case CPU_QEMU_GENERIC: case CPU_I6400: case CPU_P6600: config0 = read_c0_config(); diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c index babf2dd165a0..1718179671f3 100644 --- a/arch/mips/kernel/stacktrace.c +++ b/arch/mips/kernel/stacktrace.c @@ -38,6 +38,7 @@ static void save_context_stack(struct stack_trace *trace, { unsigned long sp = regs->regs[29]; #ifdef CONFIG_KALLSYMS + unsigned long fp = regs->regs[30]; unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; @@ -58,7 +59,7 @@ static void save_context_stack(struct stack_trace *trace, if (trace->nr_entries >= trace->max_entries) break; } - pc = unwind_stack(tsk, &sp, pc, &ra); + pc = unwind_stack(tsk, &sp, &fp, pc, &ra); } while (pc); #else save_raw_context_stack(trace, sp, savesched); @@ -86,6 +87,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) if (tsk != current) { regs->regs[29] = tsk->thread.reg29; + regs->regs[30] = tsk->thread.reg30; regs->regs[31] = 0; regs->cp0_epc = tsk->thread.reg31; } else diff --git a/arch/mips/kernel/syscall-nanomips.c b/arch/mips/kernel/syscall-nanomips.c new file mode 100644 index 000000000000..606a691ba657 --- /dev/null +++ b/arch/mips/kernel/syscall-nanomips.c @@ -0,0 +1,45 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2017 Imagination Technologies Ltd. + */ + +#include +#include +#include +#include + +/* + * clone needs to save callee saved registers so they are copied correctly to + * the child process context. + */ +asmlinkage long __sys_clone(unsigned long, unsigned long, int __user *, + unsigned long, int __user *); +save_static_function(sys_clone); +#define sys_clone __sys_clone + +SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) +{ + struct thread_info *ti = task_thread_info(current); + + ti->tp_value = addr; + if (cpu_has_userlocal) + write_c0_userlocal(addr); + + return 0; +} + +/* Provide the actual syscall number to call mapping. */ +#undef __SYSCALL +#define __SYSCALL(nr, call) [nr] = (call), + +/* + * Note that we can't include here since the header + * guard will defeat us; checks for __SYSCALL as well. + */ +const void *sys_call_table[__NR_syscalls] = { + [0 ... __NR_syscalls-1] = sys_ni_syscall, +#include +}; diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 0026b2a9ee6b..22e84d9a40d1 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -70,6 +70,7 @@ #include #include #include +#include extern void check_wait(void); extern asmlinkage void rollback_handle_int(void); @@ -144,6 +145,7 @@ __setup("raw_show_trace", set_raw_show_trace); static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) { unsigned long sp = regs->regs[29]; + unsigned long fp = regs->regs[30]; unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; @@ -157,7 +159,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) printk("Call Trace:\n"); do { print_ip_sym(pc); - pc = unwind_stack(task, &sp, pc, &ra); + pc = unwind_stack(task, &sp, &fp, pc, &ra); } while (pc); pr_cont("\n"); } @@ -206,11 +208,13 @@ void show_stack(struct task_struct *task, unsigned long *sp) regs.cp0_status = KSU_KERNEL; if (sp) { regs.regs[29] = (unsigned long)sp; + regs.regs[30] = 0; regs.regs[31] = 0; regs.cp0_epc = 0; } else { if (task && task != current) { regs.regs[29] = task->thread.reg29; + regs.regs[30] = task->thread.reg30; regs.regs[31] = 0; regs.cp0_epc = task->thread.reg31; #ifdef CONFIG_KGDB_KDB @@ -238,8 +242,9 @@ static void show_code(unsigned int __user *pc) printk("Code:"); - if ((unsigned long)pc & 1) - pc16 = (unsigned short __user *)((unsigned long)pc & ~1); + if (get_isa16_mode((unsigned long)pc)) + pc16 = (unsigned short __user *)msk_isa16_mode( + (unsigned long)pc); for(i = -3 ; i < 6 ; i++) { unsigned int insn; if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { @@ -1015,7 +1020,18 @@ asmlinkage void do_bp(struct pt_regs *regs) if (__get_user(instr[0], (u16 __user *)epc)) goto out_sigsegv; - if (!cpu_has_mmips) { + if (cpu_has_nanomips) { + if (nanomips_insn_len(instr[0]) == 2) { + /* 16-bit nanoMIPS BREAK[16] */ + bcode = instr[0] & 0x7; + } else { + /* 32-bit nanoMIPS BREAK[32] */ + if (__get_user(instr[1], (u16 __user *)(epc + 2))) + goto out_sigsegv; + opcode = (instr[0] << 16) | instr[1]; + bcode = opcode & ((1 << 19) - 1); + } + } else if (!cpu_has_mmips) { /* MIPS16e mode */ bcode = (instr[0] >> 5) & 0x3f; } else if (mm_insn_16bit(instr[0])) { @@ -1106,10 +1122,15 @@ asmlinkage void do_tr(struct pt_regs *regs) if (__get_user(instr[0], (u16 __user *)(epc + 0)) || __get_user(instr[1], (u16 __user *)(epc + 2))) goto out_sigsegv; - opcode = (instr[0] << 16) | instr[1]; - /* Immediate versions don't provide a code. */ - if (!(opcode & OPCODE)) - tcode = (opcode >> 12) & ((1 << 4) - 1); + + if (cpu_has_nanomips) { + tcode = (instr[1] >> 11) & ((1 << 5) - 1); + } else { + opcode = (instr[0] << 16) | instr[1]; + /* Immediate versions don't provide a code. */ + if (!(opcode & OPCODE)) + tcode = (opcode >> 12) & ((1 << 4) - 1); + } } else { if (__get_user(opcode, (u32 __user *)epc)) goto out_sigsegv; @@ -1151,7 +1172,9 @@ asmlinkage void do_ri(struct pt_regs *regs) status = mipsr2_decoder(regs, opcode, &fcr31); switch (status) { case 0: +#ifdef SIGEMT case SIGEMT: +#endif return; case SIGILL: goto no_r2_instr; @@ -1178,6 +1201,8 @@ no_r2_instr: goto out; if (!get_isa16_mode(regs->cp0_epc)) { + WARN_ON(cpu_has_nanomips); + if (unlikely(get_user(opcode, epc) < 0)) status = SIGSEGV; @@ -1484,14 +1509,17 @@ asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr) enum ctx_state prev_state; prev_state = exception_enter(); - current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; - if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0, - current->thread.trap_nr, SIGFPE) == NOTIFY_STOP) - goto out; - /* Clear MSACSR.Cause before enabling interrupts */ - write_msa_csr(msacsr & ~MSA_CSR_CAUSEF); - local_irq_enable(); + if (IS_ENABLED(CONFIG_CPU_HAS_MSA)) { + current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; + if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0, + current->thread.trap_nr, SIGFPE) == NOTIFY_STOP) + goto out; + + /* Clear MSACSR.Cause before enabling interrupts */ + write_msa_csr(msacsr & ~MSA_CSR_CAUSEF); + local_irq_enable(); + } die_if_kernel("do_msa_fpe invoked from kernel context!", regs); force_sig(SIGFPE, current); @@ -1595,6 +1623,11 @@ asmlinkage void do_mt(struct pt_regs *regs) { int subcode; + if (WARN_ON(!cpu_has_mipsmt)) { + force_sig(SIGILL, current); + return; + } + if (IS_ENABLED(CONFIG_MIPS_MT)) { subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) >> VPECONTROL_EXCPT_SHIFT; @@ -1741,6 +1774,7 @@ static inline void parity_protection_init(void) case CPU_P5600: case CPU_QEMU_GENERIC: case CPU_P6600: + case CPU_I7200: { unsigned long errctl; unsigned int l1parity_present, l2parity_present; @@ -1981,13 +2015,33 @@ void __init *set_except_vector(int n, void *addr) old_handler = xchg(&exception_handlers[n], handler); if (n == 0 && cpu_has_divec) { + unsigned int k0 = 26; +#ifdef CONFIG_CPU_NANOMIPS + u16 *buf = (u16 *)(ebase + 0x200); + long offs = handler - (ebase + 0x204); + /* FIXME convert to uasm of some sort */ + if (offs < (1l << 25) && offs >= -(1l << 25)) { + /* BC[32] handler */ + u32 insn32 = (0x28000000 | + (offs < 0) | + (offs & ((1l << 25) - 1))); + *(buf++) = insn32 >> 16; + *(buf++) = insn32 & 0xffff; + } else { + /* LI[48] k0, handler */ + *(buf++) = 0x6000 | k0 << 5; + *(buf++) = handler & 0xffff; + *(buf++) = handler >> 16; + /* JRC k0 */ + *(buf++) = 0xd800 | k0 << 5; + } +#else #ifdef CONFIG_CPU_MICROMIPS unsigned long jump_mask = ~((1 << 27) - 1); #else unsigned long jump_mask = ~((1 << 28) - 1); #endif u32 *buf = (u32 *)(ebase + 0x200); - unsigned int k0 = 26; if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { uasm_i_j(&buf, handler & ~jump_mask); uasm_i_nop(&buf); @@ -1996,6 +2050,7 @@ void __init *set_except_vector(int n, void *addr) uasm_i_jr(&buf, k0); uasm_i_nop(&buf); } +#endif local_flush_icache_range(ebase + 0x200, (unsigned long)buf); } return (void *)old_handler; @@ -2044,17 +2099,22 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) * If no shadow set is selected then use the default handler * that does normal register saving and standard interrupt exit */ - extern char except_vec_vi, except_vec_vi_lui; - extern char except_vec_vi_ori, except_vec_vi_end; + extern char except_vec_vi, except_vec_vi_end; extern char rollback_except_vec_vi; char *vec_start = using_rollback_handler() ? &rollback_except_vec_vi : &except_vec_vi; -#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) +#if defined(CONFIG_CPU_NANOMIPS) + extern char except_vec_vi_li48; + const int li48_offset = &except_vec_vi_li48 - vec_start; +#else + extern char except_vec_vi_lui, except_vec_vi_ori; +# if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) const int lui_offset = &except_vec_vi_lui - vec_start + 2; const int ori_offset = &except_vec_vi_ori - vec_start + 2; -#else +# else const int lui_offset = &except_vec_vi_lui - vec_start; const int ori_offset = &except_vec_vi_ori - vec_start; +# endif #endif const int handler_len = &except_vec_vi_end - vec_start; @@ -2072,10 +2132,16 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) #else handler_len); #endif +#ifdef CONFIG_CPU_NANOMIPS + h = (u16 *)(b + li48_offset); + h[1] = handler; + h[2] = handler >> 16; +#else h = (u16 *)(b + lui_offset); *h = (handler >> 16) & 0xffff; h = (u16 *)(b + ori_offset); *h = (handler & 0xffff); +#endif local_flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); } @@ -2440,7 +2506,7 @@ void __init trap_init(void) set_except_vector(EXCCODE_SYS, handle_sys); set_except_vector(EXCCODE_BP, handle_bp); - if (rdhwr_noopt) + if (rdhwr_noopt || IS_ENABLED(CONFIG_CPU_NANOMIPS)) set_except_vector(EXCCODE_RI, handle_ri); else { if (cpu_has_vtag_icache) @@ -2533,8 +2599,12 @@ void mips_hwtrigger_info(const char *file, unsigned long line, struct pt_regs *regs, unsigned long code, const char *why) { + unsigned long way, idx, offset, addr; + unsigned int taglo; char *who; + preempt_disable(); + who = (regs && user_mode(regs)) ? current->comm : "kernel"; pr_info("%s:%lu: HWTRIGGER(%lu) for %s%s%s\n", @@ -2544,6 +2614,42 @@ void mips_hwtrigger_info(const char *file, unsigned long line, show_registers(regs); else dump_stack(); + + if (current_cpu_type() == CPU_I7200) { + pr_info("L1 data cache:\n"); + + for (way = 0; + way < current_cpu_data.dcache.ways; + way++) { + for (idx = 0; + idx < current_cpu_data.dcache.sets; + idx++) { + for (offset = 0; + offset < current_cpu_data.dcache.linesz; + offset += 4) { + addr = way << current_cpu_data.dcache.waybit; + addr |= idx * current_cpu_data.dcache.linesz; + addr |= offset; + __builtin_mips_cache(Index_Load_Tag_D, + (void *)CKSEG0ADDR(addr)); + back_to_back_c0_hazard(); + + if (!offset) { + taglo = read_c0_dtaglo(); + pr_info(" 0x%01lx.0x%03lx [0x%08x %c%c%c]", + way, idx, taglo, + (taglo & BIT(7)) ? 'V' : ' ', + (taglo & BIT(6)) ? 'D' : ' ', + (taglo & BIT(5)) ? 'L' : ' '); + } + + pr_cont(" 0x%08x", read_c0_ddatalo()); + } + } + } + } + + preempt_enable(); } #ifdef CONFIG_DEBUG_FS diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 2d0b912f9e3e..5ac6dd71b8b9 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -112,9 +112,9 @@ extern void show_registers(struct pt_regs *regs); do { \ __asm__ __volatile__ (".set\tnoat\n" \ "1:\t"type##_lb("%0", "0(%2)")"\n" \ - "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ + "2:\t"type##_lbu("$at", "1(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ + "or\t%0, $at\n\t" \ "li\t%1, 0\n" \ "3:\t.set\tat\n\t" \ ".insn\n\t" \ @@ -159,15 +159,15 @@ do { \ ".set\tpush\n" \ ".set\tnoat\n\t" \ "1:"type##_lb("%0", "0(%2)")"\n\t" \ - "2:"type##_lbu("$1", "1(%2)")"\n\t" \ + "2:"type##_lbu("$at", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "3:"type##_lbu("$1", "2(%2)")"\n\t" \ + "or\t%0, $at\n\t" \ + "3:"type##_lbu("$at", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "4:"type##_lbu("$1", "3(%2)")"\n\t" \ + "or\t%0, $at\n\t" \ + "4:"type##_lbu("$at", "3(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ + "or\t%0, $at\n\t" \ "li\t%1, 0\n" \ ".set\tpop\n" \ "10:\n\t" \ @@ -193,9 +193,9 @@ do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\t"type##_lbu("%0", "0(%2)")"\n" \ - "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ + "2:\t"type##_lbu("$at", "1(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ + "or\t%0, $at\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -263,15 +263,15 @@ do { \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ "1:"type##_lbu("%0", "0(%2)")"\n\t" \ - "2:"type##_lbu("$1", "1(%2)")"\n\t" \ + "2:"type##_lbu("$at", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "3:"type##_lbu("$1", "2(%2)")"\n\t" \ + "or\t%0, $at\n\t" \ + "3:"type##_lbu("$at", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "4:"type##_lbu("$1", "3(%2)")"\n\t" \ + "or\t%0, $at\n\t" \ + "4:"type##_lbu("$at", "3(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ + "or\t%0, $at\n\t" \ "li\t%1, 0\n" \ ".set\tpop\n" \ "10:\n\t" \ @@ -296,27 +296,27 @@ do { \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ "1:lb\t%0, 0(%2)\n\t" \ - "2:lbu\t $1, 1(%2)\n\t" \ + "2:lbu\t $at, 1(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "3:lbu\t$1, 2(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "3:lbu\t$at, 2(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "4:lbu\t$1, 3(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "4:lbu\t$at, 3(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "5:lbu\t$1, 4(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "5:lbu\t$at, 4(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "6:lbu\t$1, 5(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "6:lbu\t$at, 5(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "7:lbu\t$1, 6(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "7:lbu\t$at, 6(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "8:lbu\t$1, 7(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "8:lbu\t$at, 7(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ + "or\t%0, $at\n\t" \ "li\t%1, 0\n" \ ".set\tpop\n\t" \ "10:\n\t" \ @@ -347,8 +347,8 @@ do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\t"type##_sb("%1", "1(%2)")"\n" \ - "srl\t$1, %1, 0x8\n" \ - "2:\t"type##_sb("$1", "0(%2)")"\n" \ + "srl\t$at, %1, 0x8\n" \ + "2:\t"type##_sb("$at", "0(%2)")"\n" \ ".set\tat\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ @@ -414,12 +414,12 @@ do { \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ "1:"type##_sb("%1", "3(%2)")"\n\t" \ - "srl\t$1, %1, 0x8\n\t" \ - "2:"type##_sb("$1", "2(%2)")"\n\t" \ - "srl\t$1, $1, 0x8\n\t" \ - "3:"type##_sb("$1", "1(%2)")"\n\t" \ - "srl\t$1, $1, 0x8\n\t" \ - "4:"type##_sb("$1", "0(%2)")"\n\t" \ + "srl\t$at, %1, 0x8\n\t" \ + "2:"type##_sb("$at", "2(%2)")"\n\t" \ + "srl\t$at, $at, 0x8\n\t" \ + "3:"type##_sb("$at", "1(%2)")"\n\t" \ + "srl\t$at, $at, 0x8\n\t" \ + "4:"type##_sb("$at", "0(%2)")"\n\t" \ ".set\tpop\n\t" \ "li\t%0, 0\n" \ "10:\n\t" \ @@ -445,21 +445,21 @@ do { \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ "1:sb\t%1, 7(%2)\n\t" \ - "dsrl\t$1, %1, 0x8\n\t" \ - "2:sb\t$1, 6(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "3:sb\t$1, 5(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "4:sb\t$1, 4(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "5:sb\t$1, 3(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "6:sb\t$1, 2(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "7:sb\t$1, 1(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "8:sb\t$1, 0(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ + "dsrl\t$at, %1, 0x8\n\t" \ + "2:sb\t$at, 6(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "3:sb\t$at, 5(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "4:sb\t$at, 4(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "5:sb\t$at, 3(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "6:sb\t$at, 2(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "7:sb\t$at, 1(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "8:sb\t$at, 0(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ ".set\tpop\n\t" \ "li\t%0, 0\n" \ "10:\n\t" \ @@ -491,9 +491,9 @@ do { \ do { \ __asm__ __volatile__ (".set\tnoat\n" \ "1:\t"type##_lb("%0", "1(%2)")"\n" \ - "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ + "2:\t"type##_lbu("$at", "0(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ + "or\t%0, $at\n\t" \ "li\t%1, 0\n" \ "3:\t.set\tat\n\t" \ ".insn\n\t" \ @@ -538,15 +538,15 @@ do { \ ".set\tpush\n" \ ".set\tnoat\n\t" \ "1:"type##_lb("%0", "3(%2)")"\n\t" \ - "2:"type##_lbu("$1", "2(%2)")"\n\t" \ + "2:"type##_lbu("$at", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "3:"type##_lbu("$1", "1(%2)")"\n\t" \ + "or\t%0, $at\n\t" \ + "3:"type##_lbu("$at", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "4:"type##_lbu("$1", "0(%2)")"\n\t" \ + "or\t%0, $at\n\t" \ + "4:"type##_lbu("$at", "0(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ + "or\t%0, $at\n\t" \ "li\t%1, 0\n" \ ".set\tpop\n" \ "10:\n\t" \ @@ -573,9 +573,9 @@ do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\t"type##_lbu("%0", "1(%2)")"\n" \ - "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ + "2:\t"type##_lbu("$at", "0(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ + "or\t%0, $at\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -643,15 +643,15 @@ do { \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ "1:"type##_lbu("%0", "3(%2)")"\n\t" \ - "2:"type##_lbu("$1", "2(%2)")"\n\t" \ + "2:"type##_lbu("$at", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "3:"type##_lbu("$1", "1(%2)")"\n\t" \ + "or\t%0, $at\n\t" \ + "3:"type##_lbu("$at", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "4:"type##_lbu("$1", "0(%2)")"\n\t" \ + "or\t%0, $at\n\t" \ + "4:"type##_lbu("$at", "0(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ + "or\t%0, $at\n\t" \ "li\t%1, 0\n" \ ".set\tpop\n" \ "10:\n\t" \ @@ -676,27 +676,27 @@ do { \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ "1:lb\t%0, 7(%2)\n\t" \ - "2:lbu\t$1, 6(%2)\n\t" \ + "2:lbu\t$at, 6(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "3:lbu\t$1, 5(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "3:lbu\t$at, 5(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "4:lbu\t$1, 4(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "4:lbu\t$at, 4(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "5:lbu\t$1, 3(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "5:lbu\t$at, 3(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "6:lbu\t$1, 2(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "6:lbu\t$at, 2(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "7:lbu\t$1, 1(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "7:lbu\t$at, 1(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ - "8:lbu\t$1, 0(%2)\n\t" \ + "or\t%0, $at\n\t" \ + "8:lbu\t$at, 0(%2)\n\t" \ "dsll\t%0, 0x8\n\t" \ - "or\t%0, $1\n\t" \ + "or\t%0, $at\n\t" \ "li\t%1, 0\n" \ ".set\tpop\n\t" \ "10:\n\t" \ @@ -725,8 +725,8 @@ do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ "1:\t"type##_sb("%1", "0(%2)")"\n" \ - "srl\t$1,%1, 0x8\n" \ - "2:\t"type##_sb("$1", "1(%2)")"\n" \ + "srl\t$at,%1, 0x8\n" \ + "2:\t"type##_sb("$at", "1(%2)")"\n" \ ".set\tat\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ @@ -792,12 +792,12 @@ do { \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ "1:"type##_sb("%1", "0(%2)")"\n\t" \ - "srl\t$1, %1, 0x8\n\t" \ - "2:"type##_sb("$1", "1(%2)")"\n\t" \ - "srl\t$1, $1, 0x8\n\t" \ - "3:"type##_sb("$1", "2(%2)")"\n\t" \ - "srl\t$1, $1, 0x8\n\t" \ - "4:"type##_sb("$1", "3(%2)")"\n\t" \ + "srl\t$at, %1, 0x8\n\t" \ + "2:"type##_sb("$at", "1(%2)")"\n\t" \ + "srl\t$at, $at, 0x8\n\t" \ + "3:"type##_sb("$at", "2(%2)")"\n\t" \ + "srl\t$at, $at, 0x8\n\t" \ + "4:"type##_sb("$at", "3(%2)")"\n\t" \ ".set\tpop\n\t" \ "li\t%0, 0\n" \ "10:\n\t" \ @@ -823,21 +823,21 @@ do { \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ "1:sb\t%1, 0(%2)\n\t" \ - "dsrl\t$1, %1, 0x8\n\t" \ - "2:sb\t$1, 1(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "3:sb\t$1, 2(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "4:sb\t$1, 3(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "5:sb\t$1, 4(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "6:sb\t$1, 5(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "7:sb\t$1, 6(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ - "8:sb\t$1, 7(%2)\n\t" \ - "dsrl\t$1, $1, 0x8\n\t" \ + "dsrl\t$at, %1, 0x8\n\t" \ + "2:sb\t$at, 1(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "3:sb\t$at, 2(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "4:sb\t$at, 3(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "5:sb\t$at, 4(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "6:sb\t$at, 5(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "7:sb\t$at, 6(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ + "8:sb\t$at, 7(%2)\n\t" \ + "dsrl\t$at, $at, 0x8\n\t" \ ".set\tpop\n\t" \ "li\t%0, 0\n" \ "10:\n\t" \ @@ -2281,8 +2281,304 @@ sigill: force_sig(SIGILL, current); } +static unsigned int nanomips_dec_gpr3(unsigned int r3) +{ + if (r3 < 4) + return 16 + r3; + return r3; +} + +static unsigned int nanomips_dec_gpr3_src_store(unsigned int r3) +{ + return r3 ? nanomips_dec_gpr3(r3) : 0; +} + +static unsigned int nanomips_dec_gpr4(unsigned int r4) +{ + if (r4 < 4 || r4 >= 8) + return 8 + r4; + return r4; +} + +static unsigned int nanomips_dec_gpr4_zero(unsigned int r4) +{ + return (r4 == 3) ? 0 : nanomips_dec_gpr4(r4); +} + +static void emulate_load_store_nanoMIPS(struct pt_regs *regs, void __user *addr) +{ + unsigned int rt, insn_len; + u16 insn[3], __user *epc; + int i, err; + union { + s32 s32; + u16 u16; + s16 s16; + } data; + char insn_str[(ARRAY_SIZE(insn) * 5) + 1]; + + /* Read the first half-word of the instruction */ + epc = (u16 __user *)regs->cp0_epc; + if (__get_user(insn[0], epc)) + goto fault; + + /* Decode length from the first half-word, read the whole instruction */ + insn_len = nanomips_insn_len(insn[0]); + for (i = 1; i < insn_len / sizeof(insn[0]); i++) { + if (__get_user(insn[i], &epc[i])) + goto fault; + } + + /* Default to the standard 32 bit rt encoding */ + rt = (insn[0] >> 5) & 0x1f; + + switch (insn[0] >> 10) { + case 0x05: /* LW[16] */ + rt = nanomips_dec_gpr3((insn[0] >> 7) & 0x7); + goto lw; + case 0x08: /* P32A */ + switch (insn[1] & 0x7) { + case 0x7: /* _POOL32A7 */ + switch ((insn[1] >> 3) & 0x7) { + case 0x0: /* P.LSX */ + rt = insn[1] >> 11; + switch ((insn[1] >> 6) & 0x1) { + case 0x0: /* PP.LSX */ + switch ((insn[1] >> 7) & 0xf) { + case 0x4: /* LHX */ + goto lh; + case 0x5: /* SHX */ + goto sh; + case 0x6: /* LHUX */ + goto lhu; + case 0x8: /* LWX */ + goto lw; + case 0x9: /* SWX */ + goto sw; + default: + goto sigill; + } + case 0x1: /* PP.LSXS */ + switch ((insn[1] >> 7) & 0xf) { + case 0x4: /* LHXS */ + goto lh; + case 0x5: /* SHXS */ + goto sh; + case 0x6: /* LHUXS */ + goto lhu; + case 0x8: /* LWXS[32] */ + goto lw; + case 0x9: /* SWXS */ + goto sw; + default: + goto sigill; + } + } + default: + goto sigill; + } + default: + goto sigill; + } + case 0x0d: /* LW[SP] */ + goto lw; + case 0x10: /* P.GP.W */ + switch (insn[1] & 0x3) { + case 0x2: /* LW[GP] */ + goto lw; + case 0x3: /* SW[GP] */ + goto sw; + default: + goto sigill; + } + case 0x11: /* P.GP.BH */ + switch ((insn[0] >> 2) & 0x7) { + case 0x4: /* P.GP.LH */ + switch (insn[1] & 0x1) { + case 0x0: /* LH[GP] */ + goto lh; + case 0x1: /* LHU[GP] */ + goto lhu; + } + case 0x5: /* P.GP.SH */ + switch (insn[1] & 0x1) { + case 0x0: /* SH[GP] */ + goto sh; + default: + goto sigill; + } + default: + goto sigill; + } + case 0x14: /* P16C */ + switch (insn[0] & 0x1) { + case 0x1: /* LWXS[16] */ + rt = nanomips_dec_gpr3((insn[0] >> 1) & 0x7); + goto lw; + default: + goto sigill; + } + case 0x15: /* LW[GP16] */ + rt = nanomips_dec_gpr3((insn[0] >> 7) & 0x7); + goto lw; + case 0x18: /* POOL48I */ + switch (insn[0] & 0x1f) { + case 0x0b: /* LWPC[48] */ + goto lw; + case 0x0f: /* SWPC[48] */ + goto sw; + default: + goto sigill; + } + case 0x1d: /* LW[4X4] */ + rt = nanomips_dec_gpr4(((insn[0] >> 6) & 0x8) | + ((insn[0] >> 5) & 0x7)); + goto lw; + case 0x1f: /* P16.LH */ + rt = nanomips_dec_gpr3((insn[0] >> 7) & 0x7); + switch (((insn[0] >> 2) & 0x2) | (insn[0] & 0x1)) { + case 0x0: /* LH[16] */ + goto lh; + case 0x1: /* SH[16] */ + goto sh; + case 0x2: /* LHU[16] */ + goto lhu; + default: + goto sigill; + } + case 0x21: /* P.LS.U12 */ + switch (insn[1] >> 12) { + case 0x4: /* LH[U12] */ + goto lh; + case 0x5: /* SH[U12] */ + goto sh; + case 0x6: /* LHU[U12] */ + goto lhu; + case 0x8: /* LW[U12] */ + goto lw; + case 0x9: /* SW[U12] */ + goto sw; + default: + goto sigill; + } + case 0x25: /* SW[16] */ + rt = nanomips_dec_gpr3_src_store((insn[0] >> 7) & 0x7); + goto sw; + case 0x29: /* P.LS.S9 */ + switch ((insn[1] >> 8) & 0x7) { + case 0x0: /* P.LS.S0 */ + switch ((insn[1] >> 11) & 0xf) { + case 0x4: /* LH[S9] */ + goto lh; + case 0x5: /* SH[S9] */ + goto sh; + case 0x6: /* LHU[S9] */ + goto lhu; + case 0x8: /* LW[S9] */ + goto lw; + case 0x9: /* SW[S9] */ + goto sw; + default: + goto sigill; + } + default: + goto sigill; + } + case 0x2d: /* SW[SP] */ + goto sw; + case 0x35: /* SW[GP16] */ + rt = nanomips_dec_gpr3_src_store((insn[0] >> 7) & 0x7); + goto lw; + case 0x3d: /* SW[4X4] */ + rt = nanomips_dec_gpr4_zero(((insn[0] >> 6) & 0x8) | + ((insn[0] >> 5) & 0x7)); + goto sw; + default: + goto sigill; + } + + /* + * The switch statements above should always jump to one of the labels + * below, so we should never fall through to here. If we do then + * something is wrong so warn about it. + */ + for (i = 0; i < insn_len / sizeof(insn[0]); i++) + sprintf(&insn_str[i * 5], "%04x ", insn[i]); + insn_str[(i * 5) - 1] = 0; + WARN(1, "Bad case in %s, insn %s\n", __func__, insn_str); + goto sigill; + +lh: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + LoadHW(addr, data.s16, err); + if (err) + goto fault; + regs->regs[rt] = (long)data.s16; + goto done; + +lhu: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + LoadHWU(addr, data.u16, err); + if (err) + goto fault; + regs->regs[rt] = data.u16; + goto done; + +lw: + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + LoadW(addr, data.s32, err); + if (err) + goto fault; + regs->regs[rt] = (long)data.s32; + goto done; + +sh: + if (!access_ok(VERIFY_WRITE, addr, 2)) + goto sigbus; + data.s16 = regs->regs[rt]; + StoreHW(addr, data.s16, err); + if (err) + goto fault; + goto done; + +sw: + if (!access_ok(VERIFY_WRITE, addr, 4)) + goto sigbus; + data.s32 = regs->regs[rt]; + StoreW(addr, data.s32, err); + if (err) + goto fault; + goto done; + +done: + regs->cp0_epc += insn_len; +#ifdef CONFIG_DEBUG_FS + unaligned_instructions++; +#endif + return; + +fault: + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGSEGV, current); + return; + +sigbus: + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGBUS, current); + return; + +sigill: + die_if_kernel("Unhandled kernel unaligned access or invalid instruction", + regs); + force_sig(SIGILL, current); +} + asmlinkage void do_ade(struct pt_regs *regs) { + void (*emu_func)(struct pt_regs *regs, void __user *addr); enum ctx_state prev_state; unsigned int __user *pc; mm_segment_t seg; @@ -2319,27 +2615,23 @@ asmlinkage void do_ade(struct pt_regs *regs) if (unaligned_action == UNALIGNED_ACTION_SHOW) show_registers(regs); - if (cpu_has_mmips) { - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); - emulate_load_store_microMIPS(regs, - (void __user *)regs->cp0_badvaddr); - set_fs(seg); + if (cpu_has_nanomips) + emu_func = emulate_load_store_nanoMIPS; + else if (cpu_has_mmips) + emu_func = emulate_load_store_microMIPS; + else if (cpu_has_mips16) + emu_func = emulate_load_store_MIPS16e; + else + emu_func = NULL; - return; - } - - if (cpu_has_mips16) { + if (emu_func) { seg = get_fs(); if (!user_mode(regs)) set_fs(KERNEL_DS); - emulate_load_store_MIPS16e(regs, - (void __user *)regs->cp0_badvaddr); + emu_func(regs, (void __user *)regs->cp0_badvaddr); set_fs(seg); - return; - } + } goto sigbus; } diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c index f7a0645ccb82..e8c37a7e24a8 100644 --- a/arch/mips/kernel/uprobes.c +++ b/arch/mips/kernel/uprobes.c @@ -27,6 +27,15 @@ static inline int insn_has_delay_slot(const union mips_instruction insn) int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr) { +#ifdef __nanomips__ + if (addr & 0x1) + return -EINVAL; + + aup->ixol[0] = aup->insn[0]; + aup->ixol[1] = UPROBE_XOLBREAK_INSN; + + return 0; +#else union mips_instruction inst; /* @@ -44,9 +53,10 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *aup, } aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)]; - aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */ + aup->ixol[1] = UPROBE_XOLBREAK_INSN; return 0; +#endif } /** @@ -61,6 +71,19 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *aup, */ bool is_trap_insn(uprobe_opcode_t *insn) { +#ifdef __nanomips__ + switch (insn->h[0] >> 10) { + case 0x00: /* P.ADDIU (BREAK[32]) */ + case 0x04: /* P16.MV (BREAK[16]) */ + return ((insn->h[0] & GENMASK(9, 3)) >> 3) == 0x2; + + case 0x08: /* P32A (TEQ & TNE) */ + return (insn->h[1] & GENMASK(9, 0)) == 0; + + default: + return false; + } +#else union mips_instruction inst; inst.word = *insn; @@ -93,6 +116,7 @@ bool is_trap_insn(uprobe_opcode_t *insn) } return 0; +#endif } #define UPROBE_TRAP_NR ULONG_MAX @@ -110,6 +134,9 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) * Now find the EPC where to resume after the breakpoint has been * dealt with. This may require emulation of a branch. */ +#ifdef __nanomips__ + aup->resume_epc = regs->cp0_epc + nanomips_insn_len(aup->insn[0].h[0]); +#else aup->resume_epc = regs->cp0_epc + 4; if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) { unsigned long epc; @@ -119,6 +146,7 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) (union mips_instruction) aup->insn[0]); aup->resume_epc = regs->cp0_epc; } +#endif utask->autask.saved_trap_nr = current->thread.trap_nr; current->thread.trap_nr = UPROBE_TRAP_NR; regs->cp0_epc = current->utask->xol_vaddr; @@ -231,6 +259,19 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len) { unsigned long kaddr, kstart; +#ifdef __nanomips__ + u16 buf[sizeof(((struct arch_uprobe *)NULL)->ixol)]; + uprobe_opcode_t *ops = src; + unsigned int d, i, j; + + for (d = i = 0; i < ARRAY_SIZE(((struct arch_uprobe *)NULL)->ixol); i++) { + for (j = 0; j < (nanomips_insn_len(ops[i].h[0]) / 2); j++) + buf[d++] = ops[i].h[j]; + } + + src = buf; + len = d * sizeof(buf[0]); +#endif /* __nanomips__ */ /* Initialize the slot */ kaddr = (unsigned long)kmap_atomic(page); @@ -261,5 +302,36 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) */ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) { - return 0; +#ifdef __nanomips__ + uprobe_opcode_t *i = &auprobe->insn[0]; + unsigned long next_pc; + long offset; + + pr_debug("%s: %0*llx\n", __func__, + nanomips_insn_len(i->h[0]) * 2, + ((u64)i->h[0] << ((nanomips_insn_len(i->h[0]) - 2) * 8)) | + ((nanomips_insn_len(i->h[0]) >= 4) ? + ((u64)i->h[1] << ((nanomips_insn_len(i->h[0]) - 4) * 8)) : 0) | + ((nanomips_insn_len(i->h[0]) >= 6) ? + ((u64)i->h[2] << ((nanomips_insn_len(i->h[0]) - 6) * 8)) : 0)); + + next_pc = regs->cp0_epc + nanomips_insn_len(i->h[0]); + + switch (i->h[0] >> 10) { + case 0x0a: /* P.BAL */ + offset = (u32)(i->h[0] & GENMASK(8, 0)) << 16; + offset |= i->h[1] & GENMASK(15, 1); + offset |= (u32)(i->h[1] & BIT(0)) << 25; + offset = sign_extend64(offset, 25); + + if (i->h[0] & BIT(9)) + regs->regs[31] = next_pc; + regs->cp0_epc = next_pc + offset; + return true; + + default: + return false; + } +#endif + return false; } diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c index 19fcab7348b1..912590c8abb9 100644 --- a/arch/mips/kernel/watch.c +++ b/arch/mips/kernel/watch.c @@ -18,27 +18,33 @@ void mips_install_watch_registers(struct task_struct *t) { struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264; + unsigned int watchhi = MIPS_WATCHHI_G | /* Trap all ASIDs */ + MIPS_WATCHHI_IRW; /* Clear result bits */ + unsigned int vpe; + + if (boot_cpu_type() == CPU_I7200) { + vpe = cpu_vpe_id(¤t_cpu_data); + watchhi = MIPS_WATCHHI_U | /* Match user mode */ + MIPS_WATCHHI_MTEN_ID_VPE | /* Match all TCs */ + (vpe << MIPS_WATCHHI_MTEN_ID_S) | /* of this VPE */ + MIPS_WATCHHI_IRW_RSLT; /* Clear result bits */ + } + switch (current_cpu_data.watch_reg_use_cnt) { default: BUG(); case 4: write_c0_watchlo3(watches->watchlo[3]); - /* Write 1 to the I, R, and W bits to clear them, and - 1 to G so all ASIDs are trapped. */ - write_c0_watchhi3(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW | - watches->watchhi[3]); + write_c0_watchhi3(watchhi | watches->watchhi[3]); case 3: write_c0_watchlo2(watches->watchlo[2]); - write_c0_watchhi2(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW | - watches->watchhi[2]); + write_c0_watchhi2(watchhi | watches->watchhi[2]); case 2: write_c0_watchlo1(watches->watchlo[1]); - write_c0_watchhi1(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW | - watches->watchhi[1]); + write_c0_watchhi1(watchhi | watches->watchhi[1]); case 1: write_c0_watchlo0(watches->watchlo[0]); - write_c0_watchhi0(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW | - watches->watchhi[0]); + write_c0_watchhi0(watchhi | watches->watchhi[0]); } } @@ -51,21 +57,24 @@ void mips_read_watch_registers(void) { struct mips3264_watch_reg_state *watches = ¤t->thread.watch.mips3264; + unsigned int watchhi_mask = MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW; + + if (boot_cpu_type() == CPU_I7200) { + /* Return only result & enable bits to userspace */ + watchhi_mask = MIPS_WATCHHI_IRW_RSLT | MIPS_WATCHHI_IRW; + } + switch (current_cpu_data.watch_reg_use_cnt) { default: BUG(); case 4: - watches->watchhi[3] = (read_c0_watchhi3() & - (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW)); + watches->watchhi[3] = (read_c0_watchhi3() & watchhi_mask); case 3: - watches->watchhi[2] = (read_c0_watchhi2() & - (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW)); + watches->watchhi[2] = (read_c0_watchhi2() & watchhi_mask); case 2: - watches->watchhi[1] = (read_c0_watchhi1() & - (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW)); + watches->watchhi[1] = (read_c0_watchhi1() & watchhi_mask); case 1: - watches->watchhi[0] = (read_c0_watchhi0() & - (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW)); + watches->watchhi[0] = (read_c0_watchhi0() & watchhi_mask); } if (current_cpu_data.watch_reg_use_cnt == 1 && (watches->watchhi[0] & MIPS_WATCHHI_IRW) == 0) { @@ -90,27 +99,119 @@ void mips_clear_watch_registers(void) BUG(); case 8: write_c0_watchlo7(0); + write_c0_watchhi7(0); case 7: write_c0_watchlo6(0); + write_c0_watchhi6(0); case 6: write_c0_watchlo5(0); + write_c0_watchhi5(0); case 5: write_c0_watchlo4(0); + write_c0_watchhi4(0); case 4: write_c0_watchlo3(0); + write_c0_watchhi3(0); case 3: write_c0_watchlo2(0); + write_c0_watchhi2(0); case 2: write_c0_watchlo1(0); + write_c0_watchhi1(0); case 1: write_c0_watchlo0(0); + write_c0_watchhi0(0); + } +} + + +void mips_probe_i7200_watch_registers(struct cpuinfo_mips *c) +{ + unsigned int t; + + /* All capability bits are in watchhi - set and read back */ + write_c0_watchhi0(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW); + back_to_back_c0_hazard(); + t = read_c0_watchhi0(); + write_c0_watchhi0(0); + + if (!(t & MIPS_WATCHHI_IRW)) { + /* No watch registers */ + return; } + + /* Config1.WR = 0, but watch registers are indeed present */ + c->options |= MIPS_CPU_WATCH; + + c->watch_reg_masks[0] = t; + c->watch_reg_count = 1; + c->watch_reg_use_cnt = 1; + if ((t & MIPS_WATCHHI_M) == 0) + return; + + write_c0_watchhi1(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW); + back_to_back_c0_hazard(); + t = read_c0_watchhi1(); + write_c0_watchhi1(0); + + c->watch_reg_masks[1] = t; + c->watch_reg_count = 2; + c->watch_reg_use_cnt = 2; + if ((t & MIPS_WATCHHI_M) == 0) + return; + + write_c0_watchhi2(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW); + back_to_back_c0_hazard(); + t = read_c0_watchhi2(); + write_c0_watchhi2(0); + + c->watch_reg_masks[2] = t; + c->watch_reg_count = 3; + c->watch_reg_use_cnt = 3; + if ((t & MIPS_WATCHHI_M) == 0) + return; + + write_c0_watchhi3(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW); + back_to_back_c0_hazard(); + t = read_c0_watchhi3(); + write_c0_watchhi3(0); + + c->watch_reg_masks[3] = t; + c->watch_reg_count = 4; + c->watch_reg_use_cnt = 4; + if ((t & MIPS_WATCHHI_M) == 0) + return; + + /* We use at most 4, but probe and report up to 8. */ + c->watch_reg_count = 5; + t = read_c0_watchhi4(); + if ((t & MIPS_WATCHHI_M) == 0) + return; + + c->watch_reg_count = 6; + t = read_c0_watchhi5(); + if ((t & MIPS_WATCHHI_M) == 0) + return; + + c->watch_reg_count = 7; + t = read_c0_watchhi6(); + if ((t & MIPS_WATCHHI_M) == 0) + return; + + c->watch_reg_count = 8; } + void mips_probe_watch_registers(struct cpuinfo_mips *c) { unsigned int t; + switch (boot_cpu_type()) { + case CPU_I7200: + /* I7200 has non-standard watch registers */ + return mips_probe_i7200_watch_registers(c); + } + if ((c->options & MIPS_CPU_WATCH) == 0) return; /* diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index e84e12655fa8..a105b54fb13e 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -3,9 +3,16 @@ # Makefile for MIPS-specific library files.. # -lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ - mips-atomic.o strncpy_user.o \ - strnlen_user.o uncached.o +lib-y += bitops.o +lib-y += csum_partial.o +lib-y += delay.o +lib-y += memcpy.o +lib-y += memmove.o +lib-y += memset.o +lib-y += mips-atomic.o +lib-y += strncpy_user.o +lib-y += strnlen_user.o +lib-y += uncached.o obj-y += iomap.o iomap_copy.o obj-$(CONFIG_PCI) += iomap-pci.o diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index 68c495ed71e3..e0477aae6342 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -26,14 +26,8 @@ void __delay(unsigned long loops) { - __asm__ __volatile__ ( - " .set noreorder \n" - " .align 3 \n" - "1: bnez %0, 1b \n" - " " __stringify(LONG_SUBU) " %0, %1 \n" - " .set reorder \n" - : "=r" (loops) - : GCC_DADDI_IMM_ASM() (1), "0" (loops)); + while (loops--) + asm volatile(""); } EXPORT_SYMBOL(__delay); diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index 5a2cf36dfba6..ee176b7ed71e 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Unified implementation of memcpy, memmove and the __copy_user backend. + * Unified implementation of memcpy and the __copy_user backend. * * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. @@ -36,8 +36,8 @@ #include #include #include +#include -#define dst a0 #define src a1 #define len a2 @@ -59,8 +59,6 @@ * - src is readable (no exceptions when reading src) * copy_from_user * - dst is writable (no exceptions when writing dst) - * __copy_user uses a non-standard calling convention; see - * include/asm-mips/uaccess.h * * When an exception happens on a load, the handler must # ensure that all of the destination buffer is overwritten to prevent @@ -73,21 +71,18 @@ /* * The exception handler for loads requires that: - * 1- AT contain the address of the byte just past the end of the source + * 1- a3 contain the address of the byte just past the end of the source * of the copy, - * 2- src_entry <= src < AT, and + * 2- src_entry <= src < a3, and * 3- (dst - src) == (dst_entry - src_entry), * The _entry suffix denotes values when __copy_user was called. * - * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user + * (1) is set up up by uaccess.h and maintained by not writing a3 in copy_user * (2) is met by incrementing src by the number of bytes copied * (3) is met by not doing loads between a pair of increments of dst and src * * The exception handlers for stores adjust len (if necessary) and return. * These handlers do not need to overwrite any data. - * - * For __rmemcpy and memmove an exception is always a kernel bug, therefore - * they're not protected. */ /* Instruction type */ @@ -98,6 +93,7 @@ #define DST_PREFETCH 2 #define LEGACY_MODE 1 #define EVA_MODE 2 +#define MEMCPY_MODE 3 #define USEROP 1 #define KERNELOP 2 @@ -113,7 +109,9 @@ */ #define EXC(insn, type, reg, addr, handler) \ - .if \mode == LEGACY_MODE; \ + .if \mode == MEMCPY_MODE; \ + insn reg, addr; \ + .elseif \mode == LEGACY_MODE; \ 9: insn reg, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ @@ -162,24 +160,6 @@ #define NBYTES 8 #define LOG_NBYTES 3 -/* - * As we are sharing code base with the mips32 tree (which use the o32 ABI - * register definitions). We need to redefine the register definitions from - * the n64 ABI register naming to the o32 ABI register naming. - */ -#undef t0 -#undef t1 -#undef t2 -#undef t3 -#define t0 $8 -#define t1 $9 -#define t2 $10 -#define t3 $11 -#define t4 $12 -#define t5 $13 -#define t6 $14 -#define t7 $15 - #else #define LOADK lw /* No exception */ @@ -205,7 +185,7 @@ #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) #define _PREF(hint, addr, type) \ - .if \mode == LEGACY_MODE; \ + .if \mode != EVA_MODE; \ PREF(hint, addr); \ .else; \ .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \ @@ -217,9 +197,7 @@ * register should remain intact because it's \ * used later on. Therefore use $v1. \ */ \ - .set at=v1; \ PREFE(hint, addr); \ - .set noat; \ .else; \ PREF(hint, addr); \ .endif; \ @@ -249,35 +227,23 @@ #define ADDRMASK (NBYTES-1) .text - .set noreorder -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS - .set noat -#else - .set at=v1 -#endif .align 5 /* * Macro to build the __copy_user common code * Arguments: - * mode : LEGACY_MODE or EVA_MODE + * mode : LEGACY_MODE, EVA_MODE or MEMCPY_MODE * from : Source operand. USEROP or KERNELOP * to : Destination operand. USEROP or KERNELOP */ - .macro __BUILD_COPY_USER mode, from, to - - /* initialize __memcpy if this the first time we execute this macro */ - .ifnotdef __memcpy - .set __memcpy, 1 - .hidden __memcpy /* make sure it does not leak */ - .endif + .macro __BUILD_COPY_USER mode, from, to, dst, uncopied /* * Note: dst & src may be unaligned, len may be 0 * Temps */ -#define rem t8 +#define rem ta2 R10KCBARRIER(0(ra)) /* @@ -288,34 +254,31 @@ * If len < NBYTES use byte operations. */ PREFS( 0, 0(src) ) - PREFD( 1, 0(dst) ) + PREFD( 1, 0(\dst) ) sltu t2, len, NBYTES - and t1, dst, ADDRMASK + and t1, \dst, ADDRMASK PREFS( 0, 1*32(src) ) - PREFD( 1, 1*32(dst) ) + PREFD( 1, 1*32(\dst) ) + and t0, src, ADDRMASK bnez t2, .Lcopy_bytes_checklen\@ - and t0, src, ADDRMASK PREFS( 0, 2*32(src) ) - PREFD( 1, 2*32(dst) ) + PREFD( 1, 2*32(\dst) ) #ifndef CONFIG_CPU_MIPSR6 bnez t1, .Ldst_unaligned\@ - nop bnez t0, .Lsrc_unaligned_dst_aligned\@ #else or t0, t0, t1 bnez t0, .Lcopy_unaligned_bytes\@ #endif /* - * use delay slot for fall-through * src and dst are aligned; need to compute rem */ .Lboth_aligned\@: - SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter + SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter + and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES - and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) PREFS( 0, 3*32(src) ) - PREFD( 1, 3*32(dst) ) - .align 4 + PREFD( 1, 3*32(\dst) ) 1: R10KCBARRIER(0(ra)) LOAD(t0, UNIT(0)(src), .Ll_exc\@) @@ -323,33 +286,32 @@ LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) SUB len, len, 8*NBYTES - LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) - LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@) - STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@) - STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@) + LOAD(ta0, UNIT(4)(src), .Ll_exc_copy\@) + LOAD(ta1, UNIT(5)(src), .Ll_exc_copy\@) + STORE(t0, UNIT(0)(\dst), .Ls_exc_p8u\@) + STORE(t1, UNIT(1)(\dst), .Ls_exc_p7u\@) LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@) LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@) ADD src, src, 8*NBYTES - ADD dst, dst, 8*NBYTES - STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@) - STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@) - STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@) - STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@) - STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@) - STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@) + ADD \dst, \dst, 8*NBYTES + STORE(t2, UNIT(-6)(\dst), .Ls_exc_p6u\@) + STORE(t3, UNIT(-5)(\dst), .Ls_exc_p5u\@) + STORE(ta0, UNIT(-4)(\dst), .Ls_exc_p4u\@) + STORE(ta1, UNIT(-3)(\dst), .Ls_exc_p3u\@) + STORE(t0, UNIT(-2)(\dst), .Ls_exc_p2u\@) + STORE(t1, UNIT(-1)(\dst), .Ls_exc_p1u\@) PREFS( 0, 8*32(src) ) - PREFD( 1, 8*32(dst) ) + PREFD( 1, 8*32(\dst) ) bne len, rem, 1b - nop /* * len == rem == the number of bytes left to copy < 8*NBYTES */ .Lcleanup_both_aligned\@: + sltu t0, len, 4*NBYTES beqz len, .Ldone\@ - sltu t0, len, 4*NBYTES + and rem, len, (NBYTES-1) # rem = len % NBYTES bnez t0, .Lless_than_4units\@ - and rem, len, (NBYTES-1) # rem = len % NBYTES /* * len >= 4*NBYTES */ @@ -360,30 +322,25 @@ SUB len, len, 4*NBYTES ADD src, src, 4*NBYTES R10KCBARRIER(0(ra)) - STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) - STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) - STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) - STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) - .set reorder /* DADDI_WAR */ - ADD dst, dst, 4*NBYTES + STORE(t0, UNIT(0)(\dst), .Ls_exc_p4u\@) + STORE(t1, UNIT(1)(\dst), .Ls_exc_p3u\@) + STORE(t2, UNIT(2)(\dst), .Ls_exc_p2u\@) + STORE(t3, UNIT(3)(\dst), .Ls_exc_p1u\@) + ADD \dst, \dst, 4*NBYTES beqz len, .Ldone\@ - .set noreorder .Lless_than_4units\@: /* * rem = len % NBYTES */ beq rem, len, .Lcopy_bytes\@ - nop 1: R10KCBARRIER(0(ra)) LOAD(t0, 0(src), .Ll_exc\@) ADD src, src, NBYTES SUB len, len, NBYTES - STORE(t0, 0(dst), .Ls_exc_p1u\@) - .set reorder /* DADDI_WAR */ - ADD dst, dst, NBYTES + STORE(t0, 0(\dst), .Ls_exc_p1u\@) + ADD \dst, \dst, NBYTES bne rem, len, 1b - .set noreorder #ifndef CONFIG_CPU_MIPSR6 /* @@ -398,16 +355,17 @@ * more instruction-level parallelism. */ #define bits t2 + ADD t1, \dst, len # t1 is just past last byte of dst beqz len, .Ldone\@ - ADD t1, dst, len # t1 is just past last byte of dst li bits, 8*NBYTES SLL rem, len, 3 # rem = number of bits to keep LOAD(t0, 0(src), .Ll_exc\@) SUB bits, bits, rem # bits = number of bits to discard SHIFT_DISCARD t0, t0, bits STREST(t0, -1(t1), .Ls_exc\@) + move len, zero + move \uncopied, zero jr ra - move len, zero .Ldst_unaligned\@: /* * dst is unaligned @@ -425,19 +383,19 @@ SUB t2, t2, t1 # t2 = number of bytes copied xor match, t0, t1 R10KCBARRIER(0(ra)) - STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) - beq len, t2, .Ldone\@ - SUB len, len, t2 - ADD dst, dst, t2 + STFIRST(t3, FIRST(0)(\dst), .Ls_exc\@) + SUB len, len, t2 + beqz len, .Ldone\@ + ADD \dst, \dst, t2 + ADD src, src, t2 beqz match, .Lboth_aligned\@ - ADD src, src, t2 .Lsrc_unaligned_dst_aligned\@: SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter PREFS( 0, 3*32(src) ) + and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES beqz t0, .Lcleanup_src_unaligned\@ - and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES - PREFD( 1, 3*32(dst) ) + PREFD( 1, 3*32(\dst) ) 1: /* * Avoid consecutive LD*'s to the same register since some mips @@ -460,45 +418,39 @@ #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif - STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) - STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) - STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) - STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) - PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) - .set reorder /* DADDI_WAR */ - ADD dst, dst, 4*NBYTES + STORE(t0, UNIT(0)(\dst), .Ls_exc_p4u\@) + STORE(t1, UNIT(1)(\dst), .Ls_exc_p3u\@) + STORE(t2, UNIT(2)(\dst), .Ls_exc_p2u\@) + STORE(t3, UNIT(3)(\dst), .Ls_exc_p1u\@) + PREFD( 1, 9*32(\dst) ) # 1 is PREF_STORE (not streamed) + ADD \dst, \dst, 4*NBYTES bne len, rem, 1b - .set noreorder .Lcleanup_src_unaligned\@: + and rem, len, NBYTES-1 # rem = len % NBYTES beqz len, .Ldone\@ - and rem, len, NBYTES-1 # rem = len % NBYTES beq rem, len, .Lcopy_bytes\@ - nop 1: R10KCBARRIER(0(ra)) LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) LDREST(t0, REST(0)(src), .Ll_exc_copy\@) ADD src, src, NBYTES SUB len, len, NBYTES - STORE(t0, 0(dst), .Ls_exc_p1u\@) - .set reorder /* DADDI_WAR */ - ADD dst, dst, NBYTES + STORE(t0, 0(\dst), .Ls_exc_p1u\@) + ADD \dst, \dst, NBYTES bne len, rem, 1b - .set noreorder #endif /* !CONFIG_CPU_MIPSR6 */ .Lcopy_bytes_checklen\@: beqz len, .Ldone\@ - nop .Lcopy_bytes\@: /* 0 < len < NBYTES */ R10KCBARRIER(0(ra)) #define COPY_BYTE(N) \ LOADB(t0, N(src), .Ll_exc\@); \ SUB len, len, 1; \ - beqz len, .Ldone\@; \ - STOREB(t0, N(dst), .Ls_exc_p1\@) + STOREB(t0, N(\dst), .Ls_exc_p1\@);\ + beqz len, .Ldone\@ COPY_BYTE(0) COPY_BYTE(1) @@ -510,11 +462,12 @@ #endif LOADB(t0, NBYTES-2(src), .Ll_exc\@) SUB len, len, 1 + STOREB(t0, NBYTES-2(\dst), .Ls_exc_p1\@) + move \uncopied, len jr ra - STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) .Ldone\@: + move \uncopied, len jr ra - nop #ifdef CONFIG_CPU_MIPSR6 .Lcopy_unaligned_bytes\@: @@ -528,14 +481,12 @@ COPY_BYTE(6) COPY_BYTE(7) ADD src, src, 8 + ADD \dst, \dst, 8 b 1b - ADD dst, dst, 8 #endif /* CONFIG_CPU_MIPSR6 */ - .if __memcpy == 1 - END(memcpy) - .set __memcpy, 0 - .hidden __memcpy - .endif + + /* memcpy shouldn't generate exceptions */ + .if \mode != MEMCPY_MODE .Ll_exc_copy\@: /* @@ -552,24 +503,19 @@ 1: LOADB(t1, 0(src), .Ll_exc\@) ADD src, src, 1 - sb t1, 0(dst) # can't fault -- we're copy_from_user - .set reorder /* DADDI_WAR */ - ADD dst, dst, 1 + sb t1, 0(\dst) # can't fault -- we're copy_from_user + ADD \dst, \dst, 1 bne src, t0, 1b - .set noreorder .Ll_exc\@: LOADK t0, THREAD_BUADDR($28) # t0 is just past last good address - nop - SUB len, AT, t0 # len number of uncopied bytes + SUB len, a3, t0 # len number of uncopied bytes + move \uncopied, len jr ra - nop #define SEXC(n) \ - .set reorder; /* DADDI_WAR */ \ .Ls_exc_p ## n ## u\@: \ - ADD len, len, n*NBYTES; \ + ADD \uncopied, len, n*NBYTES; \ jr ra; \ - .set noreorder SEXC(8) SEXC(7) @@ -581,81 +527,57 @@ SEXC(2) SEXC(1) .Ls_exc_p1\@: - .set reorder /* DADDI_WAR */ - ADD len, len, 1 + ADD \uncopied, len, 1 jr ra - .set noreorder .Ls_exc\@: + move \uncopied, len jr ra - nop - .endm - - .align 5 -LEAF(memmove) -EXPORT_SYMBOL(memmove) - ADD t0, a0, a2 - ADD t1, a1, a2 - sltu t0, a1, t0 # dst + len <= src -> memcpy - sltu t1, a0, t1 # dst >= src + len -> memcpy - and t0, t1 - beqz t0, .L__memcpy - move v0, a0 /* return value */ - beqz a2, .Lr_out - END(memmove) - - /* fall through to __rmemcpy */ -LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ - sltu t0, a1, a0 - beqz t0, .Lr_end_bytes_up # src >= dst - nop - ADD a0, a2 # dst = dst + len - ADD a1, a2 # src = src + len - -.Lr_end_bytes: - R10KCBARRIER(0(ra)) - lb t0, -1(a1) - SUB a2, a2, 0x1 - sb t0, -1(a0) - SUB a1, a1, 0x1 - .set reorder /* DADDI_WAR */ - SUB a0, a0, 0x1 - bnez a2, .Lr_end_bytes - .set noreorder - -.Lr_out: - jr ra - move a2, zero - -.Lr_end_bytes_up: - R10KCBARRIER(0(ra)) - lb t0, (a1) - SUB a2, a2, 0x1 - sb t0, (a0) - ADD a1, a1, 0x1 - .set reorder /* DADDI_WAR */ - ADD a0, a0, 0x1 - bnez a2, .Lr_end_bytes_up - .set noreorder - jr ra - move a2, zero - END(__rmemcpy) + .endif /* \mode != MEMCPY_MODE */ + .endm /* - * A combined memcpy/__copy_user - * __copy_user sets len to 0 for success; else to an upper bound of - * the number of uncopied bytes. - * memcpy sets v0 to dst. + * memcpy() - Copy memory + * @a0 - destination + * @a1 - source + * @a2 - length + * + * Copy @a2 bytes of memory from @a1 to @a0. + * + * Returns: the destination pointer */ .align 5 LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) - move v0, dst /* return value */ +#if _MIPS_SIM == _MIPS_SIM_PABI32 + move ta3, a0 +# define dst ta3 +#else + move v0, a0 +# define dst a0 +#endif .L__memcpy: -FEXPORT(__copy_user) + /* Legacy Mode, user <-> user */ + __BUILD_COPY_USER MEMCPY_MODE USEROP USEROP dst len +#undef dst + END(memcpy) + +/* + * __copy_user() - Copy memory + * @a0 - destination + * @a1 - source + * @a2 - length + * + * Copy @a2 bytes of memory from @a1 to @a0. + * + * Returns: the number of uncopied bytes in @a2 + */ + .align 5 +LEAF(__copy_user) EXPORT_SYMBOL(__copy_user) /* Legacy Mode, user <-> user */ - __BUILD_COPY_USER LEGACY_MODE USEROP USEROP + __BUILD_COPY_USER LEGACY_MODE USEROP USEROP a0 v0 + END(__copy_user) #ifdef CONFIG_EVA @@ -672,7 +594,7 @@ EXPORT_SYMBOL(__copy_user) LEAF(__copy_from_user_eva) EXPORT_SYMBOL(__copy_from_user_eva) - __BUILD_COPY_USER EVA_MODE USEROP KERNELOP + __BUILD_COPY_USER EVA_MODE USEROP KERNELOP a0 v0 END(__copy_from_user_eva) @@ -683,7 +605,7 @@ END(__copy_from_user_eva) LEAF(__copy_to_user_eva) EXPORT_SYMBOL(__copy_to_user_eva) -__BUILD_COPY_USER EVA_MODE KERNELOP USEROP +__BUILD_COPY_USER EVA_MODE KERNELOP USEROP a0 v0 END(__copy_to_user_eva) /* @@ -692,7 +614,7 @@ END(__copy_to_user_eva) LEAF(__copy_in_user_eva) EXPORT_SYMBOL(__copy_in_user_eva) -__BUILD_COPY_USER EVA_MODE USEROP USEROP +__BUILD_COPY_USER EVA_MODE USEROP USEROP a0 v0 END(__copy_in_user_eva) #endif diff --git a/arch/mips/lib/memmove.c b/arch/mips/lib/memmove.c new file mode 100644 index 000000000000..94b7c54d1a48 --- /dev/null +++ b/arch/mips/lib/memmove.c @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2016 Imagination Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include + +void *memmove(void *dest, const void *src, size_t count) +{ + const char *s = src; + const char *s_end = s + count; + char *d = dest; + char *d_end = dest + count; + + /* Use optimised memcpy when there's no overlap */ + if ((d_end <= s) || (s_end <= d)) + return memcpy(dest, src, count); + + if (d <= s) { + /* Incrementing copy loop */ + while (count--) + *d++ = *s++; + } else { + /* Decrementing copy loop */ + d = d_end; + s = s_end; + while (count--) + *--d = *--s; + } + + return dest; +} +EXPORT_SYMBOL(memmove); diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index 34062b5a95e1..4bd8becfa12b 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -60,13 +60,13 @@ EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup) -#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS)) +#if STORSIZE <= 8 EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup) #endif -#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) +#if STORSIZE <= 4 EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup) @@ -78,7 +78,6 @@ #endif .endm - .set noreorder .align 5 /* @@ -112,7 +111,7 @@ .set at #endif -#ifndef CONFIG_CPU_MIPSR6 +#if !defined(__mips_isa_rev) || (__mips_isa_rev < 6) R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ @@ -122,7 +121,7 @@ PTR_SUBU a0, t0 /* long align ptr */ PTR_ADDU a2, t0 /* correct size */ -#else /* CONFIG_CPU_MIPSR6 */ +#else /* __mips_isa_rev >= 6 */ #define STORE_BYTE(N) \ EX(sb, a1, N(a0), .Lbyte_fixup\@); \ beqz t0, 0f; \ @@ -132,7 +131,7 @@ PTR_ADDU t0, 1 STORE_BYTE(0) STORE_BYTE(1) -#if LONGSIZE == 4 +#if STORSIZE == 4 EX(sb, a1, 2(a0), .Lbyte_fixup\@) #else STORE_BYTE(2) @@ -145,19 +144,17 @@ ori a0, STORMASK xori a0, STORMASK PTR_ADDIU a0, STORSIZE -#endif /* CONFIG_CPU_MIPSR6 */ +#endif /* __mips_isa_rev >= 6 */ 1: ori t1, a2, 0x3f /* # of full blocks */ xori t1, 0x3f - beqz t1, .Lmemset_partial\@ /* no block to fill */ andi t0, a2, 0x40-STORSIZE + beqz t1, .Lmemset_partial\@ /* no block to fill */ PTR_ADDU t1, a0 /* end address */ - .set reorder 1: PTR_ADDIU a0, 64 R10KCBARRIER(0(ra)) f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode bne t1, a0, 1b - .set noreorder .Lmemset_partial\@: R10KCBARRIER(0(ra)) @@ -173,19 +170,15 @@ PTR_SUBU t1, AT .set at #endif - jr t1 PTR_ADDU a0, t0 /* dest ptr */ + jr t1 - .set push - .set noreorder - .set nomacro /* ... but first do longs ... */ f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode -2: .set pop - andi a2, STORMASK /* At most one long to go */ +2: andi a2, STORMASK /* At most one long to go */ beqz a2, 1f -#ifndef CONFIG_CPU_MIPSR6 +#if __mips_isa_rev < 6 PTR_ADDU a0, a2 /* What's left */ R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ @@ -193,12 +186,12 @@ #else EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) #endif -#else +#else /* __mips_isa_rev >= 6 */ PTR_SUBU t0, $0, a2 PTR_ADDIU t0, 1 STORE_BYTE(0) STORE_BYTE(1) -#if LONGSIZE == 4 +#if STORSIZE == 4 EX(sb, a1, 2(a0), .Lbyte_fixup\@) #else STORE_BYTE(2) @@ -208,9 +201,9 @@ EX(sb, a1, 6(a0), .Lbyte_fixup\@) #endif 0: -#endif -1: jr ra - move a2, zero +#endif /* __mips_isa_rev >= 6 */ +1: move v0, zero + jr ra .Lsmall_memset\@: beqz a2, 2f @@ -218,45 +211,45 @@ 1: PTR_ADDIU a0, 1 /* fill bytewise */ R10KCBARRIER(0(ra)) - bne t1, a0, 1b sb a1, -1(a0) + bne t1, a0, 1b -2: jr ra /* done */ - move a2, zero +2: move v0, zero + jr ra /* done */ .if __memset == 1 END(memset) .set __memset, 0 .hidden __memset .endif -#ifdef CONFIG_CPU_MIPSR6 +#if __mips_isa_rev >= 6 .Lbyte_fixup\@: - PTR_SUBU a2, $0, t0 + PTR_SUBU v0, $0, t0 + PTR_ADDIU v0, 1 jr ra - PTR_ADDIU a2, 1 -#endif /* CONFIG_CPU_MIPSR6 */ +#endif /* __mips_isa_rev >= 6 */ .Lfirst_fixup\@: jr ra - nop .Lfwd_fixup\@: andi a2, 0x3f LONG_L t0, THREAD_BUADDR($28) LONG_ADDU a2, t1 + LONG_SUBU v0, a2, t0 jr ra - LONG_SUBU a2, t0 .Lpartial_fixup\@: andi a2, STORMASK LONG_L t0, THREAD_BUADDR($28) LONG_ADDU a2, t1 + LONG_SUBU v0, a2, t0 jr ra - LONG_SUBU a2, t0 .Llast_fixup\@: - jr ra + move v0, a2 andi v1, a2, STORMASK + jr ra .endm @@ -270,8 +263,8 @@ LEAF(memset) EXPORT_SYMBOL(memset) - beqz a1, 1f move v0, a0 /* result */ + beqz a1, 1f andi a1, 0xff /* spread fillword */ LONG_SLL t1, a1, 8 diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S index acdff66bd5d2..882813f9818c 100644 --- a/arch/mips/lib/strncpy_user.S +++ b/arch/mips/lib/strncpy_user.S @@ -31,21 +31,21 @@ .macro __BUILD_STRNCPY_ASM func LEAF(__strncpy_from_\func\()_asm) - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? - and v0, a1 - bnez v0, .Lfault\@ + LONG_L t0, TI_ADDR_LIMIT($28) # pointer ok? + and t0, a1 + bnez t0, .Lfault\@ move t0, zero - move v1, a1 + move t3, a1 .ifeqs "\func","kernel" -1: EX(lbu, v0, (v1), .Lfault\@) +1: EX(lbu, t2, (t3), .Lfault\@) .else -1: EX(lbue, v0, (v1), .Lfault\@) +1: EX(lbue, t2, (t3), .Lfault\@) .endif - PTR_ADDIU v1, 1 + PTR_ADDIU t3, 1 R10KCBARRIER(0(ra)) - sb v0, (a0) - beqz v0, 2f + sb t2, (a0) + beqz t2, 2f PTR_ADDIU t0, 1 PTR_ADDIU a0, 1 bne t0, a2, 1b diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index e1bacf5a3abe..d380358b3cc2 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S @@ -28,34 +28,32 @@ */ .macro __BUILD_STRNLEN_ASM func LEAF(__strnlen_\func\()_asm) - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? - and v0, a0 - bnez v0, .Lfault\@ + LONG_L t0, TI_ADDR_LIMIT($28) # pointer ok? + and t0, a0 + bnez t0, .Lfault\@ - move v0, a0 - PTR_ADDU a1, a0 # stop pointer + move t0, a0 + PTR_ADDU t1, a1, a0 # stop pointer + beq t0, t1, 2f # limit reached? 1: #ifdef CONFIG_CPU_DADDI_WORKAROUNDS .set noat li AT, 1 #endif - beq v0, a1, 1f # limit reached? .ifeqs "\func", "kernel" - EX(lb, t0, (v0), .Lfault\@) + EX(lb, t2, (t0), .Lfault\@) .else - EX(lbe, t0, (v0), .Lfault\@) + EX(lbe, t2, (t0), .Lfault\@) .endif - .set noreorder - bnez t0, 1b -1: #ifndef CONFIG_CPU_DADDI_WORKAROUNDS - PTR_ADDIU v0, 1 + PTR_ADDIU t0, 1 #else - PTR_ADDU v0, AT + PTR_ADDU t0, AT .set at #endif - .set reorder - PTR_SUBU v0, a0 + beq t0, t1, 2f # limit reached? + bnez t2, 1b +2: PTR_SUBU v0, t0, a0 jr ra END(__strnlen_\func\()_asm) diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c index 09d5deea747f..03a6bf0ab0c9 100644 --- a/arch/mips/lib/uncached.c +++ b/arch/mips/lib/uncached.c @@ -68,10 +68,10 @@ unsigned long run_uncached(void *func) } __asm__ __volatile__ ( - " move $16, $sp\n" + " move $s0, $sp\n" " move $sp, %1\n" " jalr %2\n" - " move $sp, $16" + " move $sp, $s0" : "=r" (ret) : "r" (usp), "r" (ufunc) : "$16", "$31"); diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index da6c1c0c30c1..3e409b149e8b 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -2826,6 +2826,8 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, prevepc = xcp->cp0_epc; if (get_isa16_mode(prevepc) && cpu_has_mmips) { + WARN_ON(cpu_has_nanomips); + /* * Get next 2 microMIPS instructions and convert them * into 32-bit instructions. diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c index 5450f4d1c920..12e17537be49 100644 --- a/arch/mips/math-emu/dsemul.c +++ b/arch/mips/math-emu/dsemul.c @@ -225,6 +225,8 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, if (isa16) { union mips_instruction insn = { .word = ir }; + WARN_ON(cpu_has_nanomips); + /* NOP16 aka MOVE16 $0, $0 */ if ((ir >> 16) == MM_NOP16) return -1; diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 555ee7ef73a1..6af82e1db914 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -4,8 +4,8 @@ # obj-y += cache.o dma-default.o extable.o fault.o \ - gup.o init.o mmap.o page.o page-funcs.o \ - pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o + gup.o init.o mmap.o pgtable.o tlbex.o \ + tlbex-fault.o tlb-funcs.o ifdef CONFIG_CPU_MICROMIPS obj-y += uasm-micromips.o @@ -13,6 +13,11 @@ else obj-y += uasm-mips.o endif +ifneq ($(CONFIG_CPU_NANOMIPS),y) +obj-y += page.o +obj-y += page-funcs.o +endif + obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 10f26cbe72c3..9d3d39a9b96d 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1101,7 +1101,7 @@ static inline void rm7k_erratum31(void) __asm__ __volatile__ ( ".set push\n\t" ".set noreorder\n\t" - ".set mips3\n\t" + ".set " MIPS_ISA_LEVEL "\n\t" "cache\t%1, 0(%0)\n\t" "cache\t%1, 0x1000(%0)\n\t" "cache\t%1, 0x2000(%0)\n\t" @@ -1505,6 +1505,7 @@ static void probe_pcache(void) case CPU_QEMU_GENERIC: case CPU_P6600: case CPU_M6250: + case CPU_I7200: if (!(read_c0_config7() & MIPS_CONF7_IAR) && (c->icache.waysize > PAGE_SIZE)) c->icache.flags |= MIPS_CACHE_ALIASES; @@ -1761,9 +1762,7 @@ static void setup_scache(void) return; default: - if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | - MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | - MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) { + if (cpu_has_mips_r) { #ifdef CONFIG_MIPS_CPU_SCACHE if (mips_sc_init ()) { scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; @@ -1911,8 +1910,6 @@ static void r4k_cache_error_setup(void) void r4k_cache_init(void) { - extern void build_clear_page(void); - extern void build_copy_page(void); struct cpuinfo_mips *c = ¤t_cpu_data; probe_pcache(); diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S index 45dff5cd4b8e..7953fba69c26 100644 --- a/arch/mips/mm/cex-gen.S +++ b/arch/mips/mm/cex-gen.S @@ -20,7 +20,6 @@ LEAF(except_vec2_generic) .set noreorder .set noat - .set mips0 /* * This is a very bad place to be. Our cache error * detection has triggered. If we have write-back data diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 548acb7f8557..4234e826aed1 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -193,9 +193,7 @@ static inline int __init mips_sc_probe(void) return mips_sc_probe_cm3(); /* Ignore anything but MIPSxx processors */ - if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | - MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | - MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6))) + if (!cpu_has_mips_r) return 0; /* Does this MIPS32/MIPS64 CPU have a config2 register? */ diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S index a5427c6e9757..e8eef74e7855 100644 --- a/arch/mips/mm/tlb-funcs.S +++ b/arch/mips/mm/tlb-funcs.S @@ -12,10 +12,164 @@ * Copyright (C) 2012 Ralf Baechle */ #include +#include +#include #include +#include #define FASTPATH_SIZE 128 +.set noat + +#if defined(CONFIG_CPU_NANOMIPS) + +#define CP0_KSCRATCH $31 +#define KSCRATCH_PGD 2 +#define KSCRATCH_TLBEX_TMP 3 + +EXPORT(tlbmiss_handler_setup_pgd_start) +LEAF(tlbmiss_handler_setup_pgd) + .set push + .set at +#ifndef CONFIG_MIPS_PGD_C0_CONTEXT + la t0, pgd_current +#ifdef CONFIG_SMP + mfc0 t1, $SMP_CPUID_REG + srl t1, t1, SMP_CPUID_PTRSHIFT + addu t0, t0, t1 +#endif + sw a0, 0(t0) +#endif + mtc0 a0, CP0_KSCRATCH, KSCRATCH_PGD + jr ra + .set pop +END(tlbmiss_handler_setup_pgd) +EXPORT(tlbmiss_handler_setup_pgd_end) + +.macro tlbchange_head + mtc0 $1, CP0_KSCRATCH, KSCRATCH_TLBEX_TMP + mfc0 k0, CP0_KSCRATCH, KSCRATCH_PGD + mfc0 k1, CP0_BADVADDR + srl $1, k1, _PGDIR_SHIFT + lsa k0, $1, k0, _PGD_T_LOG2 + lw k0, 0(k0) +#if 0 + srl k1, k1, _PAGE_SHIFT - _PTE_T_LOG2 + andi k1, k1, (_PTRS_PER_PTE - 1) << _PTE_T_LOG2 + addu k0, k0, k1 +#else + ext k1, k1, _PAGE_SHIFT, _PGDIR_SHIFT - _PAGE_SHIFT + lsa k0, k1, k0, _PTE_T_LOG2 +#endif +#ifdef CONFIG_SMP +0: ll k1, 0(k0) +#else + lw k1, 0(k0) +#endif + tlbp +.endm + +.macro tlbchange_tail +#ifdef CONFIG_SMP + sc k1, 0(k0) + beqz k1, 0b +#else + sw k1, 0(k0) +#endif + ori k0, k0, _PTE_T_SIZE + + lw $1, -_PTE_T_SIZE(k0) + srl $1, $1, __PAGE_NO_EXEC_SHIFT + rotr $1, $1, __PAGE_GLOBAL_SHIFT - __PAGE_NO_EXEC_SHIFT + mtc0 $1, CP0_ENTRYLO0 + + lw k1, 0(k0) + srl k1, k1, __PAGE_NO_EXEC_SHIFT + rotr k1, k1, __PAGE_GLOBAL_SHIFT - __PAGE_NO_EXEC_SHIFT + mtc0 k1, CP0_ENTRYLO1 + + ehb + tlbwi + + mfc0 $1, CP0_KSCRATCH, KSCRATCH_TLBEX_TMP + eret +.endm + +LEAF(handle_tlbm) + tlbchange_head + + andi $1, k1, __PAGE_WRITE + beqz $1, 1f + + ori k1, k1, __PAGE_ACCESSED | __PAGE_MODIFIED | __PAGE_VALID | __PAGE_DIRTY + + tlbchange_tail +1: + mfc0 $1, CP0_KSCRATCH, KSCRATCH_TLBEX_TMP + j tlb_do_page_fault_1 +END(handle_tlbm) +EXPORT(handle_tlbm_end) + +LEAF(handle_tlbs) + tlbchange_head + + andi $1, k1, __PAGE_PRESENT | __PAGE_WRITE + xori $1, $1, __PAGE_PRESENT | __PAGE_WRITE + bnez $1, 1f + + ori k1, k1, __PAGE_ACCESSED | __PAGE_MODIFIED | __PAGE_VALID | __PAGE_DIRTY + + tlbchange_tail +1: + mfc0 $1, CP0_KSCRATCH, KSCRATCH_TLBEX_TMP + j tlb_do_page_fault_1 +END(handle_tlbs) +EXPORT(handle_tlbs_end) + +LEAF(handle_tlbl) + tlbchange_head + + andi $1, k1, __PAGE_PRESENT + beqz $1, 1f + + ori k1, k1, __PAGE_ACCESSED | __PAGE_VALID + + tlbchange_tail +1: + mfc0 $1, CP0_KSCRATCH, KSCRATCH_TLBEX_TMP + j tlb_do_page_fault_0 +END(handle_tlbl) +EXPORT(handle_tlbl_end) + +LEAF(handle_tlb_refill) + mfc0 k0, CP0_KSCRATCH, KSCRATCH_PGD + + mfc0 k1, CP0_BADVADDR + srl k1, k1, _PGDIR_SHIFT + lsa k0, k1, k0, _PGD_T_LOG2 + lw k0, 0(k0) + + mfc0 k1, CP0_BADVADDR + ext k1, k1, _PAGE_SHIFT + 1, _PGDIR_SHIFT - _PAGE_SHIFT - 1 + lsa k1, k1, k0, _PTE_T_LOG2 + 1 + lw k0, 0(k1) + lw k1, _PTE_T_SIZE(k1) + + srl k0, k0, __PAGE_NO_EXEC_SHIFT + rotr k0, k0, __PAGE_GLOBAL_SHIFT - __PAGE_NO_EXEC_SHIFT + mtc0 k0, CP0_ENTRYLO0 + + srl k1, k1, __PAGE_NO_EXEC_SHIFT + rotr k1, k1, __PAGE_GLOBAL_SHIFT - __PAGE_NO_EXEC_SHIFT + mtc0 k1, CP0_ENTRYLO1 + + ehb + tlbwr + eret +END(handle_tlb_refill) + +#else /* !CONFIG_TLBEX_STATIC */ + EXPORT(tlbmiss_handler_setup_pgd_start) LEAF(tlbmiss_handler_setup_pgd) 1: j 1b /* Dummy, will be replaced. */ @@ -37,3 +191,5 @@ LEAF(handle_tlbl) .space FASTPATH_SIZE * 4 END(handle_tlbl) EXPORT(handle_tlbl_end) + +#endif /* !CONFIG_TLBEX_STATIC */ diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 79b9f2ad3ff5..92f8327af7b5 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1297,6 +1297,14 @@ static void build_r4000_tlb_refill_handler(void) struct mips_huge_tlb_info htlb_info __maybe_unused; enum vmalloc64_mode vmalloc_mode __maybe_unused; + if (IS_ENABLED(CONFIG_CPU_NANOMIPS)) { + extern u16 handle_tlb_refill[]; + void *ptr = (void *)((ulong)handle_tlb_refill & ~0x1ul); + memcpy((void *)ebase, ptr, 0x100); + local_flush_icache_range(ebase, ebase + 0x100); + return; + } + memset(tlb_handler, 0, sizeof(tlb_handler)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -2608,6 +2616,13 @@ void build_tlb_refill_handler(void) check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); #endif + /* + * Results in the compiler discarding the r3000 cases. Unsure why + * __get_cpu_type doesn't always result in that by itself... + */ + if (cpu_has_mips_r6) + goto is_r4k; + switch (current_cpu_type()) { case CPU_R2000: case CPU_R3000: @@ -2639,10 +2654,11 @@ void build_tlb_refill_handler(void) break; default: +is_r4k: if (cpu_has_ldpte) setup_pw(); - if (!run_once) { + if (!run_once && !IS_ENABLED(CONFIG_CPU_NANOMIPS)) { scratch_reg = allocate_kscratch(); build_setup_pgd(); build_r4000_tlb_load_handler(); @@ -2653,13 +2669,13 @@ void build_tlb_refill_handler(void) else if (!cpu_has_local_ebase) build_r4000_tlb_refill_handler(); flush_tlb_handlers(); - run_once++; } - if (cpu_has_local_ebase) + if (!run_once || cpu_has_local_ebase) build_r4000_tlb_refill_handler(); if (cpu_has_xpa) config_xpa_params(); if (cpu_has_htw) config_htw_params(); + run_once = 1; } } diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index 9bb6baa45da3..24e5b0d06899 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -19,7 +19,6 @@ #include #include #include -#define UASM_ISA _UASM_ISA_MICROMIPS #include #define RS_MASK 0x1f diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 9fea6c6bbf49..60ceb93c71a0 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -19,7 +19,6 @@ #include #include #include -#define UASM_ISA _UASM_ISA_CLASSIC #include #define RS_MASK 0x1f diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c index 806fb798091f..a3f3224a6a41 100644 --- a/arch/mips/oprofile/backtrace.c +++ b/arch/mips/oprofile/backtrace.c @@ -12,6 +12,7 @@ struct stackframe { unsigned long sp; + unsigned long fp; unsigned long pc; unsigned long ra; }; @@ -155,6 +156,7 @@ static inline void do_kernel_backtrace(unsigned long low_addr, while (depth-- && frame->pc) { frame->pc = unwind_stack_by_address(low_addr, &(frame->sp), + &(frame->fp), frame->pc, &(frame->ra)); oprofile_add_trace(frame->ra); @@ -165,6 +167,7 @@ static inline void do_kernel_backtrace(unsigned long low_addr, void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth) { struct stackframe frame = { .sp = regs->regs[29], + .fp = regs->regs[30], .pc = regs->cp0_epc, .ra = regs->regs[31] }; const int userspace = user_mode(regs); diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 477c463c89ec..2e27c0959b8b 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -12,7 +12,7 @@ ccflags-vdso := \ cflags-vdso := $(ccflags-vdso) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ - -DDISABLE_BRANCH_PROFILING \ + -DDISABLE_BRANCH_PROFILING -mabicalls \ $(call cc-option, -fno-asynchronous-unwind-tables) \ $(call cc-option, -fno-stack-protector) aflags-vdso := $(ccflags-vdso) \ @@ -36,6 +36,12 @@ ifndef CONFIG_CPU_MIPSR6 endif endif +ifdef CONFIG_CPU_NANOMIPS + $(warning VDSO largely disabled for nanoMIPS) + obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y)) + ccflags-vdso += -DDISABLE_MIPS_VDSO +endif + # VDSO linker flags. VDSO_LDFLAGS := \ -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \ diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c index 530a36f465ce..4f9eae70972b 100644 --- a/arch/mips/vdso/genvdso.c +++ b/arch/mips/vdso/genvdso.c @@ -45,13 +45,20 @@ #ifndef SHT_MIPS_ABIFLAGS # define SHT_MIPS_ABIFLAGS 0x7000002a #endif +#ifndef EM_NANOMIPS +# define EM_NANOMIPS 249 +#endif +#ifndef EF_MIPS_ABI +# define EF_MIPS_ABI 0xf000 +#endif enum { ABI_O32 = (1 << 0), ABI_N32 = (1 << 1), ABI_N64 = (1 << 2), + ABI_P32 = (1 << 3), - ABI_ALL = ABI_O32 | ABI_N32 | ABI_N64, + ABI_ALL = ABI_O32 | ABI_N32 | ABI_N64 | ABI_P32, }; /* Symbols the kernel requires offsets for. */ @@ -168,7 +175,8 @@ static void *map_vdso(const char *path, size_t *_size) return NULL; } - if (swap_uint16(ehdr->e_machine) != EM_MIPS) { + if (swap_uint16(ehdr->e_machine) != EM_MIPS && + swap_uint16(ehdr->e_machine) != EM_NANOMIPS) { fprintf(stderr, "%s: '%s' has invalid ELF machine (expected EM_MIPS)\n", program_name, path); diff --git a/arch/mips/vdso/genvdso.h b/arch/mips/vdso/genvdso.h index 94334727059a..a1aca8ff0fe3 100644 --- a/arch/mips/vdso/genvdso.h +++ b/arch/mips/vdso/genvdso.h @@ -117,7 +117,7 @@ static inline bool FUNC(get_symbols)(const char *path, void *vdso) ELF(Shdr) *shdr; const ELF(Sym) *sym; char *strtab, *name; - uint16_t sh_count, sh_entsize, st_count, st_entsize, i, j; + uint16_t sh_count, sh_entsize, st_count, st_entsize, i, j, mach; uint64_t offset; uint32_t flags; @@ -139,8 +139,11 @@ static inline bool FUNC(get_symbols)(const char *path, void *vdso) } /* Get flags */ + mach = swap_uint16(ehdr->e_machine); flags = swap_uint32(ehdr->e_flags); - if (elf_class == ELFCLASS64) + if (mach == EM_NANOMIPS) + elf_abi = ABI_P32; + else if (elf_class == ELFCLASS64) elf_abi = ABI_N64; else if (flags & EF_MIPS_ABI2) elf_abi = ABI_N32; diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c index e22b422f282c..970abd97da91 100644 --- a/arch/mips/vdso/gettimeofday.c +++ b/arch/mips/vdso/gettimeofday.c @@ -106,7 +106,7 @@ static __always_inline u64 read_r4k_count(void) __asm__ __volatile__( " .set push\n" - " .set mips32r2\n" + " .set " MIPS_ISA_LEVEL "\n" " rdhwr %0, $2\n" " .set pop\n" : "=r" (count)); diff --git a/arch/mips/vdso/sigreturn.S b/arch/mips/vdso/sigreturn.S index 30c6219912ac..c3e07e6cc7f0 100644 --- a/arch/mips/vdso/sigreturn.S +++ b/arch/mips/vdso/sigreturn.S @@ -21,8 +21,12 @@ LEAF(__vdso_rt_sigreturn) .cfi_signal_frame - li v0, __NR_rt_sigreturn + li $2, __NR_rt_sigreturn +#if __nanomips__ + syscall32 +#else syscall +#endif END(__vdso_rt_sigreturn) @@ -31,7 +35,7 @@ LEAF(__vdso_rt_sigreturn) LEAF(__vdso_sigreturn) .cfi_signal_frame - li v0, __NR_sigreturn + li $2, __NR_sigreturn syscall END(__vdso_sigreturn) diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h index 0522468f488b..0e0c645b85b9 100644 --- a/arch/mn10300/include/asm/unistd.h +++ b/arch/mn10300/include/asm/unistd.h @@ -22,6 +22,7 @@ #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_IPC diff --git a/arch/nios2/include/uapi/asm/unistd.h b/arch/nios2/include/uapi/asm/unistd.h index b6bdae04bc84..d05e9edfd67c 100644 --- a/arch/nios2/include/uapi/asm/unistd.h +++ b/arch/nios2/include/uapi/asm/unistd.h @@ -19,6 +19,8 @@ #define sys_mmap2 sys_mmap_pgoff #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYSCALL_UNXSTAT /* Use the standard ABI for syscalls */ #include diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h index 9a3ee389631e..eee8e494fac0 100644 --- a/arch/openrisc/include/uapi/asm/unistd.h +++ b/arch/openrisc/include/uapi/asm/unistd.h @@ -22,6 +22,8 @@ #define sys_mmap2 sys_mmap_pgoff #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index 3d507d04eb4c..2dfa538cc307 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -143,6 +143,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 9ba11dbcaca9..8fb678b94f3f 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -28,6 +28,7 @@ #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_IPC diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 7807093b73be..fab0348884af 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -15,6 +15,7 @@ #define __IGNORE_pkey_free #define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE diff --git a/arch/score/include/uapi/asm/unistd.h b/arch/score/include/uapi/asm/unistd.h index 04da47bd3d46..63f35d2a1511 100644 --- a/arch/score/include/uapi/asm/unistd.h +++ b/arch/score/include/uapi/asm/unistd.h @@ -2,10 +2,12 @@ #define __ARCH_HAVE_MMU #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SYSCALL_NO_AT #define __ARCH_WANT_SYSCALL_NO_FLAGS #define __ARCH_WANT_SYSCALL_OFF_T #define __ARCH_WANT_SYSCALL_DEPRECATED +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h index b36200af9ce7..75e6325c05f0 100644 --- a/arch/sh/include/asm/unistd.h +++ b/arch/sh/include/asm/unistd.h @@ -8,6 +8,7 @@ # define __ARCH_WANT_OLD_READDIR # define __ARCH_WANT_OLD_STAT # define __ARCH_WANT_STAT64 +# define __ARCH_WANT_SYSCALL_UNXSTAT # define __ARCH_WANT_SYS_ALARM # define __ARCH_WANT_SYS_GETHOSTNAME # define __ARCH_WANT_SYS_IPC diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index b2a6a955113e..d90a50c6d4ee 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -23,6 +23,7 @@ #endif #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE diff --git a/arch/tile/include/uapi/asm/unistd.h b/arch/tile/include/uapi/asm/unistd.h index 1a169ec92ef8..559305db6818 100644 --- a/arch/tile/include/uapi/asm/unistd.h +++ b/arch/tile/include/uapi/asm/unistd.h @@ -14,10 +14,12 @@ */ #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT #if !defined(__LP64__) || defined(__SYSCALL_COMPAT) /* Use the flavor of this syscall that matches the 32-bit API better. */ #define __ARCH_WANT_SYNC_FILE_RANGE2 #endif +#define __ARCH_WANT_SYSCALL_UNXSTAT /* Use the standard ABI for syscalls. */ #include diff --git a/arch/unicore32/include/uapi/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h index 65856eaab163..338474c45fe8 100644 --- a/arch/unicore32/include/uapi/asm/unistd.h +++ b/arch/unicore32/include/uapi/asm/unistd.h @@ -12,6 +12,8 @@ */ #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYSCALL_UNXSTAT /* Use the standard ABI for syscalls. */ #include diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h index 51c4eee00732..93f6b92b9ec2 100644 --- a/arch/x86/include/asm/unistd.h +++ b/arch/x86/include/asm/unistd.h @@ -11,6 +11,8 @@ # define __SYSCALL_MASK (~0) # endif +#define __ARCH_WANT_SYSCALL_UNXSTAT + # ifdef CONFIG_X86_32 # include diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h index ed66db3bc9bb..af9051126c0c 100644 --- a/arch/xtensa/include/asm/unistd.h +++ b/arch/xtensa/include/asm/unistd.h @@ -6,6 +6,7 @@ #include #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_GETPGRP diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index cd553f96e567..c757bf46d7f0 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -496,6 +496,14 @@ config PCI_ENDPOINT_TEST Enable this configuration option to enable the host side test driver for PCI Endpoint. +config MIPS_ITC + bool "MIPS ITC passthrough driver" + depends on MIPS + default y + help + Enable this to expose the Inter-Thread Communication unit provided by + some MIPS systems to userland via sysfs. + config MIPS_SAAR bool "MIPS SAAR passthrough driver" depends on MIPS @@ -504,6 +512,22 @@ config MIPS_SAAR Enable this to expose devices that are mapped using the SAAR(I) implementation-specific coprocessor 0 registers to userland via sysfs. +config MIPS_I7200_SPRAM + bool "MIPS I7200 SPRAM driver" + depends on MIPS && CPU_NANOMIPS + default y + help + Enable this to expose the scratchpad RAMs found in the MIPS I7200 to + userland. + +config MIPS_UNCACHED_MEMORY + bool "MIPS uncached memory passthrough driver" + depends on MIPS + default y + help + Enable this to expose some DDR memory with an uncached CCA to + userland via sysfs. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 7ac512e68878..8951efb2de04 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -55,7 +55,10 @@ obj-$(CONFIG_CXL_BASE) += cxl/ obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o +obj-$(CONFIG_MIPS_ITC) += mips-itc.o obj-$(CONFIG_MIPS_SAAR) += mips-saar.o +obj-$(CONFIG_MIPS_I7200_SPRAM) += mips-i7200-spram.o +obj-$(CONFIG_MIPS_UNCACHED_MEMORY) += mips-uncached-memory.o lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o diff --git a/drivers/misc/mips-i7200-spram.c b/drivers/misc/mips-i7200-spram.c new file mode 100644 index 000000000000..c4caf2d349b1 --- /dev/null +++ b/drivers/misc/mips-i7200-spram.c @@ -0,0 +1,378 @@ +/* + * MIPS I7200 SRAM Driver + * + * Copyright (C) 2018 MIPS Technologies + * Author: Paul Burton + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define ERRCTL_SPR BIT(28) + +#define read_c0_sram_ctl() __read_32bit_c0_register($22, 3) +#define write_c0_sram_ctl(val) __write_32bit_c0_register($22, 3, val) +__BUILD_SET_C0(sram_ctl) + +#define SRAM_CTL_DSP_EN BIT(0) +#define SRAM_CTL_ISP_EN BIT(1) +#define SRAM_CTL_USP_EN BIT(2) +#define SRAM_CTL_DSPPB_EN BIT(4) + +#define write_c0_idatalo(val) __write_32bit_c0_register($28, 1, val) +#define write_c0_idatahi(val) __write_32bit_c0_register($29, 1, val) + +struct sram { + struct miscdevice misc; + + phys_addr_t base; + phys_addr_t size; + + unsigned int enable_bit; + + bool (*detect)(struct sram *s); +}; + +static struct sram srams[]; +static u32 sram_ctl; +static bool nodsppb; + +static unsigned long spram_get_unmapped_area(struct file *file, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + struct miscdevice *misc = file->private_data; + struct sram *s = container_of(misc, struct sram, misc); + unsigned long off, off_end, off_align, len_align, addr_align; + + off = pgoff << PAGE_SHIFT; + off_end = off + len; + off_align = round_up(off, s->size); + + if ((off_end <= off_align) || ((off_end - off_align) < s->size)) + goto fallback; + + len_align = len + s->size; + if ((off + len_align) < off) + goto fallback; + + addr_align = current->mm->get_unmapped_area(file, addr, len_align, + pgoff, flags); + if (!IS_ERR_VALUE(addr_align)) { + addr_align += (off - addr_align) & (s->size - 1); + return addr_align; + } + +fallback: + WARN(1, "Unable to guarantee SPRAM virtual alignment\n"); + return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); +} + +static loff_t spram_llseek(struct file *file, loff_t offset, int orig) +{ + struct miscdevice *misc = file->private_data; + struct sram *s = container_of(misc, struct sram, misc); + + return fixed_size_llseek(file, offset, orig, s->size); +} + +static int spram_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct miscdevice *misc = file->private_data; + struct sram *s = container_of(misc, struct sram, misc); + + vma->vm_pgoff += s->base >> PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + pgprot_noncached(vma->vm_page_prot))) + return -EAGAIN; + + return 0; +} + +static ssize_t spram_write(struct file *file, const char __user *buf, + size_t size, loff_t *ppos) +{ + struct miscdevice *misc = file->private_data; + struct sram *s = container_of(misc, struct sram, misc); + void __iomem *virt; + int err; + + virt = ioremap_uc(s->base, s->size); + err = copy_from_user(virt + *ppos, buf, size); + iounmap(virt); + if (err) + return -EFAULT; + *ppos += size; + return size; +} + +static ssize_t ispram_write(struct file *file, const char __user *buf, + size_t size, loff_t *ppos) +{ + struct miscdevice *misc = file->private_data; + struct sram *s = container_of(misc, struct sram, misc); + unsigned long flags, off, paddr; + u32 ctl; + union { + struct { + u32 lo; + u32 hi; + }; + char ch[8]; + } data; + + for (off = *ppos; off < ((*ppos + size + 7) & ~7); off += 8, buf += 8) { + if (copy_from_user(data.ch, buf, 8)) + return -EFAULT; + + paddr = s->base + off; + + local_irq_save(flags); + ctl = read_c0_ecc(); + write_c0_ecc(ctl | ERRCTL_SPR); + back_to_back_c0_hazard(); + write_c0_idatalo(data.lo); + write_c0_idatahi(data.hi); + back_to_back_c0_hazard(); + __builtin_mips_cache(Cache_I | (0x3 << 2), (void *)paddr); + back_to_back_c0_hazard(); + write_c0_ecc(ctl); + back_to_back_c0_hazard(); + local_irq_restore(flags); + } + + mb(); + instruction_hazard(); + *ppos += size; + return size; +} + +static const struct file_operations ispram_fops = { + .owner = THIS_MODULE, + .get_unmapped_area = spram_get_unmapped_area, + .llseek = spram_llseek, + .mmap = spram_mmap, + .write = ispram_write, +}; + +static const struct file_operations duspram_fops = { + .owner = THIS_MODULE, + .get_unmapped_area = spram_get_unmapped_area, + .llseek = spram_llseek, + .mmap = spram_mmap, + .write = spram_write, +}; + +static bool uspram_detect(struct sram *s) +{ + bool have_uspram; + u32 ctl; + + /* + * Try to figure out if we have USPRAM by enabling it & seeing if the + * enable bit sticks. This is potentially disruptive if we happen to be + * using the memory at its address, but unfortunately there's no Config + * bit like there is for DSPRAM & ISPRAM... + */ + ctl = set_c0_sram_ctl(SRAM_CTL_USP_EN); + back_to_back_c0_hazard(); + have_uspram = !!(read_c0_sram_ctl() & SRAM_CTL_USP_EN); + write_c0_sram_ctl(ctl); + if (!have_uspram) + return false; + + /* + * Really... an undiscoverable & unchangeable address range that can + * differ based on core configuration... Come on hardware folk..! + * + * These values are correct for the MTK_Tapeout configs as of + * changelist 4934677. + */ + s->base = 0x17800000; + s->size = SZ_256K; + + return true; +} + +static bool ispram_detect(struct sram *s) +{ + u32 ctl, tag0, tag1; + + if (!(read_c0_config() & BIT(24))) + return false; + + ctl = read_c0_ecc(); + write_c0_ecc(ctl | ERRCTL_SPR); + back_to_back_c0_hazard(); + asm volatile("cache\t%0, 0($zero)" :: "i"(Index_Load_Tag_I)); + back_to_back_c0_hazard(); + tag0 = read_c0_taglo(); + back_to_back_c0_hazard(); + asm volatile("cache\t%0, 8($zero)" :: "i"(Index_Load_Tag_I)); + back_to_back_c0_hazard(); + tag1 = read_c0_taglo(); + back_to_back_c0_hazard(); + write_c0_ecc(ctl); + back_to_back_c0_hazard(); + + s->base = tag0 & GENMASK(31, 12); + s->size = tag1 & GENMASK(19, 12); + + return !!s->size; +} + +static bool dspram_detect(struct sram *s) +{ + u32 ctl, tag0; + + if (!(read_c0_config() & BIT(23))) + return false; + + ctl = read_c0_ecc(); + write_c0_ecc(ctl | ERRCTL_SPR); + back_to_back_c0_hazard(); + asm volatile("cache\t%0, 0($zero)" :: "i"(Index_Load_Tag_D)); + back_to_back_c0_hazard(); + tag0 = read_c0_dtaglo(); + back_to_back_c0_hazard(); + write_c0_ecc(ctl); + back_to_back_c0_hazard(); + + s->base = tag0 & GENMASK(31, 12); + + /* + * The DSPRAM size tag isn't implemented... Apparently it isn't meant + * to be, and neither is the ISPRAM one or the address tags, but the + * replacement (likely registers in CDMM) isn't implemented either so + * we don't have anything better yet... Eww! + * + * For now we use the tags that are implemented despite them not being + * the approved way of discovering SPRAMs, because they're all we have. + * We presume the DSPRAM is the same size as the ISPRAM because we have + * no better data available... + * + * See SBM 84953 for details. + */ + s->size = srams[1].size; + + return !!s->size; +} + +static struct sram srams[] = { + { + .misc = { + .name = "uspram", + .minor = MISC_DYNAMIC_MINOR, + .mode = S_IRUSR | S_IWUSR | S_IXUSR, + .fops = &duspram_fops, + }, + .detect = uspram_detect, + .enable_bit = SRAM_CTL_USP_EN, + }, + { + .misc = { + .name = "ispram", + .minor = MISC_DYNAMIC_MINOR, + .mode = S_IRUSR | S_IWUSR | S_IXUSR, + .fops = &ispram_fops, + }, + .detect = ispram_detect, + .enable_bit = SRAM_CTL_ISP_EN, + }, + { + .misc = { + .name = "dspram", + .minor = MISC_DYNAMIC_MINOR, + .mode = S_IRUSR | S_IWUSR, + .fops = &duspram_fops, + }, + .detect = dspram_detect, + .enable_bit = SRAM_CTL_DSP_EN, + }, +}; + +static int spram_cpu_online(unsigned int cpu) +{ + write_c0_sram_ctl(sram_ctl); + return 0; +} + +static int __init spram_init(void) +{ + int err, i; + + /* This is very I7200-specific */ + if (boot_cpu_type() != CPU_I7200) + return -ENODEV; + + sram_ctl = read_c0_sram_ctl(); + sram_ctl &= ~SRAM_CTL_DSP_EN; + sram_ctl &= ~SRAM_CTL_ISP_EN; + sram_ctl &= ~SRAM_CTL_USP_EN; + + if (nodsppb) { + pr_info("Disabling DSPPB\n"); + sram_ctl &= ~SRAM_CTL_DSPPB_EN; + } + + for (i = 0; i < ARRAY_SIZE(srams); i++) { + pr_info("%cSPRAM:", toupper(srams[i].misc.name[0])); + + if (!srams[i].detect(&srams[i])) { + pr_cont(" None\n"); + continue; + } + + if (memblock_is_memory(srams[i].base) || + memblock_is_memory(srams[i].base + srams[i].size - 1)) { + pr_cont(" Overlaps DDR, Ignoring\n"); + continue; + } + + pr_cont(" %uKB @ %pa\n", + (unsigned int)(srams[i].size / SZ_1K), + &srams[i].base); + + err = misc_register(&srams[i].misc); + if (err) { + pr_err("Failed to register %cSPRAM device: %d\n", + toupper(srams[i].misc.name[0]), err); + continue; + } + + sram_ctl |= srams[i].enable_bit; + } + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "misc/mips-i7200-spram:online", + spram_cpu_online, NULL); + if (err < 0) + return err; + + return 0; +} +device_initcall(spram_init); + +static int __init parse_nodsppb(char *arg) +{ + nodsppb = true; + return 0; +} +early_param("nodsppb", parse_nodsppb); diff --git a/drivers/misc/mips-itc.c b/drivers/misc/mips-itc.c new file mode 100644 index 000000000000..c3ec7680ff98 --- /dev/null +++ b/drivers/misc/mips-itc.c @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2018 MIPS Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include + +#include +#include +#include + +#define ERRCTL_ITC BIT(26) + +#define GEN_ITC_ACCESSORS(off, name) \ +static inline uint32_t read_itc_##name(void) \ +{ \ + uint32_t val, ecc = read_c0_ecc(); \ + write_c0_ecc(ecc | ERRCTL_ITC); \ + back_to_back_c0_hazard(); \ + asm volatile("cache\t%0, %1($zero)" \ + : /* no outputs */ \ + : "i"(Index_Load_Tag_D), \ + "i"(off)); \ + back_to_back_c0_hazard(); \ + val = read_c0_dtaglo(); \ + write_c0_ecc(ecc); \ + back_to_back_c0_hazard(); \ + return val; \ +} \ + \ +static inline void write_itc_##name(uint32_t val) \ +{ \ + uint32_t ecc = read_c0_ecc(); \ + write_c0_ecc(ecc | ERRCTL_ITC); \ + write_c0_dtaglo(val); \ + back_to_back_c0_hazard(); \ + asm volatile("cache\t%0, %1($zero)" \ + : /* no outputs */ \ + : "i"(Index_Store_Tag_D), \ + "i"(off)); \ + write_c0_ecc(ecc); \ + back_to_back_c0_hazard(); \ +} + +GEN_ITC_ACCESSORS(0x0, addrmap0) +GEN_ITC_ACCESSORS(0x8, addrmap1) + +static unsigned int itc_num_cells(void) +{ + return (read_itc_addrmap1() >> 20) & 0x7ff; +} + +struct itc_device { + struct device *dev; + struct bin_attribute battr_map; + struct bin_attribute battr_cells; + phys_addr_t base_phys; + char str_cells[16]; +}; + +static uint32_t itc_addr[2]; + +static int itc_mmap(struct file *file, struct kobject *kobj, + struct bin_attribute *battr, struct vm_area_struct *vma) +{ + struct device *dev = kobj_to_dev(kobj); + struct itc_device *itc = dev_get_drvdata(dev); + + vma->vm_pgoff += itc->base_phys >> PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + pgprot_noncached(vma->vm_page_prot))) + return -EAGAIN; + + return 0; +} + +static ssize_t itc_cells_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t pos, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct itc_device *itc = dev_get_drvdata(dev); + + memcpy(buf, &itc->str_cells[pos], count); + + return count; +} + +static int itc_probe(struct platform_device *pdev) +{ + struct itc_device *itc; + struct resource *res; + uint32_t num_cells; + int err; + + num_cells = itc_num_cells(); + if (!num_cells) + return -ENODEV; + + itc = devm_kzalloc(&pdev->dev, sizeof(*itc), GFP_KERNEL); + if (!itc) + return -ENOMEM; + + itc->dev = &pdev->dev; + dev_set_drvdata(itc->dev, itc); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(itc->dev, "found no memory resource\n"); + return -EINVAL; + } + + if (!devm_request_mem_region(itc->dev, res->start, + resource_size(res), pdev->name)) { + dev_err(itc->dev, "could not request region for resource\n"); + return -EBUSY; + } + + itc->base_phys = res->start; + snprintf(itc->str_cells, sizeof(itc->str_cells), + "%u", num_cells); + + sysfs_bin_attr_init(&itc->battr_map); + itc->battr_map.attr.name = "map"; + itc->battr_map.attr.mode = S_IRUSR | S_IWUSR; + itc->battr_map.mmap = itc_mmap; + itc->battr_map.size = resource_size(res); + + err = device_create_bin_file(itc->dev, &itc->battr_map); + if (err) + return err; + + sysfs_bin_attr_init(&itc->battr_cells); + itc->battr_cells.attr.name = "cells"; + itc->battr_cells.attr.mode = S_IRUSR; + itc->battr_cells.read = itc_cells_read; + itc->battr_cells.size = strlen(itc->str_cells); + + err = device_create_bin_file(itc->dev, &itc->battr_cells); + if (err) + return err; + + dev_info(itc->dev, "%u cells\n", num_cells); + return 0; +} + +static struct platform_driver itc_driver = { + .driver = { + .name = "mips-itc", + }, + .probe = itc_probe, +}; + +static int itc_cpu_online_cache_tags(unsigned int cpu) +{ + write_itc_addrmap1(itc_addr[1]); + write_itc_addrmap0(itc_addr[0]); + return 0; +} + +static int __init itc_register_cache_tags(void) +{ + const phys_addr_t phys_addr = 0x17000000; + struct platform_device *pdev; + struct resource res; + int err; + + itc_addr[0] = phys_addr | BIT(0); + itc_addr[1] = ~PAGE_MASK & GENMASK(16, 10); + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "misc/mips-itc:online", + itc_cpu_online_cache_tags, NULL); + if (err < 0) + return err; + + memset(&res, 0, sizeof(res)); + res.flags = IORESOURCE_MEM; + res.start = phys_addr; + res.end = phys_addr + PAGE_SIZE - 1; + + pdev = platform_device_register_resndata(NULL, itc_driver.driver.name, + 0, &res, 1, + "itc", 4); + return PTR_ERR_OR_ZERO(pdev); +} + +static int __init itc_init(void) +{ + int err; + + err = platform_driver_register(&itc_driver); + if (err) + return err; + + switch (boot_cpu_type()) { + case CPU_I7200: + return itc_register_cache_tags(); + + default: + return 0; + } +} +postcore_initcall(itc_init); diff --git a/drivers/misc/mips-uncached-memory.c b/drivers/misc/mips-uncached-memory.c new file mode 100644 index 000000000000..a5a58fed077d --- /dev/null +++ b/drivers/misc/mips-uncached-memory.c @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2018 MIPS Technologies + * Author: Matt Redfearn + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#define pr_fmt(fmt) "MIPS uncached memory: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define BOSTON_BUILD_CONFIG0 (0x34) +#define BOSTON_BUILD_CONFIG0_LLSC (BIT(25)) + +struct mum_device { + struct device *dev; + struct bin_attribute battr_map; + void *memory; +}; + +static int mum_mmap(struct file *file, struct kobject *kobj, + struct bin_attribute *battr, struct vm_area_struct *vma) +{ + struct device *dev = kobj_to_dev(kobj); + struct mum_device *mum = dev_get_drvdata(dev); + unsigned long size = vma->vm_end - vma->vm_start; + + pr_debug("mmap %ld bytes uncached from physical %lx (%px kern, %lx userspace)\n", + size, virt_to_phys(mum->memory), mum->memory, vma->vm_start); + + vma->vm_pgoff += virt_to_phys(mum->memory) >> PAGE_SHIFT; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +static int mum_probe(struct platform_device *pdev) +{ + struct mum_device *mum; + + mum = devm_kzalloc(&pdev->dev, sizeof(*mum), GFP_KERNEL); + if (!mum) + return -ENOMEM; + + mum->dev = &pdev->dev; + dev_set_drvdata(mum->dev, mum); + + /* 1 page by default */ + mum->memory = (void*)__get_free_page(GFP_KERNEL); + if (!mum->memory) + return -ENOMEM; + + /* Ensure kernel page is written back. */ + preempt_disable(); + memset(mum->memory, 0, PAGE_SIZE); + blast_dcache_range((unsigned long)mum->memory, + (unsigned long)mum->memory + PAGE_SIZE); + bc_wback_inv((unsigned long)mum->memory, PAGE_SIZE); + __sync(); + preempt_enable(); + + sysfs_bin_attr_init(&mum->battr_map); + mum->battr_map.attr.name = "map"; + mum->battr_map.attr.mode = S_IRUSR | S_IWUSR; + mum->battr_map.mmap = mum_mmap; + mum->battr_map.size = PAGE_SIZE; + + return device_create_bin_file(mum->dev, &mum->battr_map); +} + +static struct platform_driver mum_driver = { + .driver = { + .name = "mips-uncached-memory", + }, + .probe = mum_probe, +}; + +static struct platform_device mum_device = { + .name = "mips-uncached-memory", +}; + +static int __init mum_init(void) +{ + struct regmap *plt_regs; + u32 reg; + int err; + + err = platform_driver_register(&mum_driver); + if (err) + return err; + + plt_regs = syscon_regmap_lookup_by_compatible("img,boston-platform-regs"); + if (IS_ERR(plt_regs)) { + /* Not Boston? */ + return 0; + } + + regmap_read(plt_regs, BOSTON_BUILD_CONFIG0, ®); + + if ((read_c0_config5() & MIPS_CONF5_ULS) && + (reg & BOSTON_BUILD_CONFIG0_LLSC)) { + pr_info("Supported on this platform\n"); + return platform_device_register(&mum_device); + } + + return 0; +} +postcore_initcall(mum_init); diff --git a/fs/stat.c b/fs/stat.c index 873785dae022..75dff63e50ac 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -293,6 +293,8 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) #endif +#ifdef __ARCH_WANT_SYSCALL_UNXSTAT + static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) { struct stat tmp; @@ -379,6 +381,8 @@ SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) return error; } +#endif /* __ARCH_WANT_SYSCALL_UNXSTAT */ + SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, char __user *, buf, int, bufsiz) { diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h index da09fb986459..75fe8fdb47e0 100644 --- a/include/asm-generic/audit_dir_write.h +++ b/include/asm-generic/audit_dir_write.h @@ -27,10 +27,12 @@ __NR_mknod, __NR_mkdirat, __NR_mknodat, __NR_unlinkat, -__NR_renameat, __NR_linkat, __NR_symlinkat, #endif +#ifdef __NR_renameat +__NR_renameat, +#endif #ifdef __NR_renameat2 __NR_renameat2, #endif diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index cdf904265caf..4af00654ca0b 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -8,6 +8,8 @@ * be selected by default. */ #if __BITS_PER_LONG == 32 +#ifdef __ARCH_WANT_SYSCALL_UNXSTAT #define __ARCH_WANT_STAT64 +#endif #define __ARCH_WANT_SYS_LLSEEK #endif diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a78186d826d7..4a34c9642332 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -495,6 +495,8 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, asmlinkage long sys_msync(unsigned long start, size_t len, int flags); asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice); asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice); +asmlinkage long sys_fadvise64_64_2(int fd, int advice, loff_t offset, + loff_t len); asmlinkage long sys_munmap(unsigned long addr, size_t len); asmlinkage long sys_mlock(unsigned long start, size_t len); asmlinkage long sys_munlock(unsigned long start, size_t len); @@ -894,6 +896,9 @@ asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg); +asmlinkage long sys_mmap_4koff(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name, struct file_handle __user *handle, int __user *mnt_id, int flag); diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 0a294e950df8..261390ff8bd0 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -204,4 +204,9 @@ static inline void uprobe_clear_state(struct mm_struct *mm) { } #endif /* !CONFIG_UPROBES */ + +#ifndef uprobe_opcode_equal +# define uprobe_opcode_equal(a, b) ((a) == (b)) +#endif + #endif /* _LINUX_UPROBES_H */ diff --git a/include/uapi/asm-generic/stat.h b/include/uapi/asm-generic/stat.h index 0d962ecd1663..855e5db9e761 100644 --- a/include/uapi/asm-generic/stat.h +++ b/include/uapi/asm-generic/stat.h @@ -2,6 +2,11 @@ #ifndef __ASM_GENERIC_STAT_H #define __ASM_GENERIC_STAT_H +#include + +/* statx deprecates the un-extended stat syscalls which use struct stat[64] */ +#ifdef __ARCH_WANT_SYSCALL_UNXSTAT + /* * Everybody gets this wrong and has to stick with it for all * eternity. Hopefully, this version gets used by new architectures @@ -70,4 +75,6 @@ struct stat64 { }; #endif +#endif /* __ARCH_WANT_SYSCALL_UNXSTAT */ + #endif /* __ASM_GENERIC_STAT_H */ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 8b87de067bc7..56ab657de196 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -242,10 +242,12 @@ __SYSCALL(__NR_tee, sys_tee) /* fs/stat.c */ #define __NR_readlinkat 78 __SYSCALL(__NR_readlinkat, sys_readlinkat) +#ifdef __ARCH_WANT_SYSCALL_UNXSTAT #define __NR3264_fstatat 79 __SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat) #define __NR3264_fstat 80 __SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat) +#endif /* __ARCH_WANT_SYSCALL_UNXSTAT */ /* fs/sync.c */ #define __NR_sync 81 @@ -465,10 +467,15 @@ __SYSCALL(__NR_uname, sys_newuname) __SYSCALL(__NR_sethostname, sys_sethostname) #define __NR_setdomainname 162 __SYSCALL(__NR_setdomainname, sys_setdomainname) + +#ifdef __ARCH_WANT_SET_GET_RLIMIT +/* getrlimit and setrlimit are superseded with prlimit64 */ #define __NR_getrlimit 163 __SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit) #define __NR_setrlimit 164 __SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit) +#endif + #define __NR_getrusage 165 __SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage) #define __NR_umask 166 @@ -611,7 +618,11 @@ __SC_COMP(__NR_execve, sys_execve, compat_sys_execve) __SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) /* mm/fadvise.c */ #define __NR3264_fadvise64 223 +#ifdef __ARCH_WANT_SYS_FADVISE64_64_2 +__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64_2, compat_sys_fadvise64_64) +#else __SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64) +#endif /* mm/, CONFIG_MMU only */ #ifndef __ARCH_NOMMU @@ -917,8 +928,10 @@ __SYSCALL(__NR_fork, sys_ni_syscall) #define __NR_ftruncate __NR3264_ftruncate #define __NR_lseek __NR3264_lseek #define __NR_sendfile __NR3264_sendfile +#ifdef __NR3264_fstat #define __NR_newfstatat __NR3264_fstatat #define __NR_fstat __NR3264_fstat +#endif #define __NR_mmap __NR3264_mmap #define __NR_fadvise64 __NR3264_fadvise64 #ifdef __NR3264_stat @@ -933,8 +946,10 @@ __SYSCALL(__NR_fork, sys_ni_syscall) #define __NR_ftruncate64 __NR3264_ftruncate #define __NR_llseek __NR3264_lseek #define __NR_sendfile64 __NR3264_sendfile +#ifdef __NR3264_fstat #define __NR_fstatat64 __NR3264_fstatat #define __NR_fstat64 __NR3264_fstat +#endif #define __NR_mmap2 __NR3264_mmap #define __NR_fadvise64_64 __NR3264_fadvise64 #ifdef __NR3264_stat diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index 31aa10178335..3c40eb478b80 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -42,6 +42,7 @@ #define EM_MICROBLAZE 189 /* Xilinx MicroBlaze */ #define EM_TILEGX 191 /* Tilera TILE-Gx */ #define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */ +#define EM_NANOMIPS 249 /* nanoMIPS */ #define EM_FRV 0x5441 /* Fujitsu FR-V */ /* diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c658c40ee87b..003889d87ee9 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -223,7 +223,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, */ bool __weak is_swbp_insn(uprobe_opcode_t *insn) { - return *insn == UPROBE_SWBP_INSN; + return uprobe_opcode_equal(*insn, UPROBE_SWBP_INSN); } /** @@ -1711,7 +1711,12 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) int result; pagefault_disable(); +#ifdef uprobe_opcode_equal + result = copy_from_user(&opcode, (uprobe_opcode_t __user *)vaddr, + sizeof(opcode)); +#else result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); +#endif pagefault_enable(); if (likely(result == 0)) @@ -1728,7 +1733,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) if (result < 0) return result; - copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); + copy_from_page(page, vaddr, &opcode, sizeof(opcode)); put_page(page); out: /* This needs to return true for any variant of the trap insn */ diff --git a/mm/fadvise.c b/mm/fadvise.c index ec70d6e4b86d..6b761bc4a02d 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -185,3 +185,17 @@ SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice) } #endif + +#ifdef __ARCH_WANT_SYS_FADVISE64_64_2 + +/* + * Put advice before offset so it doesn't leave a register hole due to unaligned + * 64-bit arguments. + */ +SYSCALL_DEFINE4(fadvise64_64_2, int, fd, int, advice, + loff_t, offset, loff_t, len) +{ + return sys_fadvise64_64(fd, offset, len, advice); +} + +#endif diff --git a/mm/mmap.c b/mm/mmap.c index 9efdc021ad22..63c93408367a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1561,6 +1561,20 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ +#ifdef __ARCH_WANT_SYS_MMAP_4KOFF +SYSCALL_DEFINE6(mmap_4koff, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, pgoff) +{ + if (pgoff & (~PAGE_MASK >> 12)) + return -EINVAL; + + return sys_mmap_pgoff(addr, len, prot, flags, fd, + pgoff >> (PAGE_SHIFT - 12)); +} +#endif /* __ARCH_WANT_SYS_MMAP_4KOFF */ + + /* * Some shared mappigns will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index fd5f19c988e4..0a279d99a532 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -3022,7 +3022,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v) const char *name = vif->dev ? vif->dev->name : "none"; seq_printf(seq, - "%2zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", + "%2td %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", vif - mrt->vif_table, name, vif->bytes_in, vif->pkt_in, vif->bytes_out, vif->pkt_out, diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh index ee3dfb5be6cd..eb7a28481edb 100755 --- a/scripts/checksyscalls.sh +++ b/scripts/checksyscalls.sh @@ -38,6 +38,15 @@ cat << EOF #define __IGNORE_lstat64 /* fstatat64 */ #endif +/* statx */ +#if BITS_PER_LONG == 64 +#define __IGNORE_newfstatat /* statx */ +#define __IGNORE_newfstat /* statx */ +#else +#define __IGNORE_fstatat64 /* statx */ +#define __IGNORE_fstat64 /* statx */ +#endif + /* Missing flags argument */ #define __IGNORE_renameat /* renameat2 */ @@ -179,9 +188,11 @@ cat << EOF #define __IGNORE_futimesat /* utimensat */ #define __IGNORE_getpgrp /* getpgid */ #define __IGNORE_getdents /* getdents64 */ +#define __IGNORE_getrlimit /* prlimit64 */ #define __IGNORE_pause /* sigsuspend */ #define __IGNORE_poll /* ppoll */ #define __IGNORE_select /* pselect6 */ +#define __IGNORE_setrlimit /* prlimit64 */ #define __IGNORE_epoll_wait /* epoll_pwait */ #define __IGNORE_time /* gettimeofday */ #define __IGNORE_uname /* newuname */ diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 9ee9bf7fd1a2..43db5747b78e 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -90,6 +90,11 @@ static inline int is_arm_mapping_symbol(const char *str) && (str[2] == '\0' || str[2] == '.'); } +static inline int is_nanomips_local_symbol(const char *str) +{ + return str[0] == '.' && str[1] == 'L'; +} + static int check_symbol_range(const char *sym, unsigned long long addr, struct addr_range *ranges, int entries) { @@ -152,11 +157,16 @@ static int read_symbol(FILE *in, struct sym_entry *s) } else if (toupper(stype) == 'U' || - is_arm_mapping_symbol(sym)) + is_arm_mapping_symbol(sym) || + is_nanomips_local_symbol(sym)) return -1; /* exclude also MIPS ELF local symbols ($L123 instead of .L123) */ else if (str[0] == '$') return -1; + /* exclude also nanoMIPS L0^A symbols (since v65, see DMZ2677) */ + else if (str[0] == 'L' && str[1] == '0' && + str[2] == 1 && str[3] == '\0') + return -1; /* exclude debugging symbols */ else if (stype == 'N' || stype == 'n') return -1; diff --git a/scripts/mksysmap b/scripts/mksysmap index a35acc0d0b82..086c51e20b95 100755 --- a/scripts/mksysmap +++ b/scripts/mksysmap @@ -41,4 +41,4 @@ # so we just ignore them to let readprofile continue to work. # (At least sparc64 has __crc_ in the middle). -$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( .L\)' > $2 +$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( .L\)\| L0' > $2 diff --git a/scripts/sortextable.c b/scripts/sortextable.c index 365a907f98b3..4e1b5293e082 100644 --- a/scripts/sortextable.c +++ b/scripts/sortextable.c @@ -51,6 +51,10 @@ #define EM_ARCV2 195 #endif +#ifndef EM_NANOMIPS +#define EM_NANOMIPS 249 +#endif + static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */ @@ -325,6 +329,7 @@ do_file(char const *const fname) case EM_ARM: case EM_MICROBLAZE: case EM_MIPS: + case EM_NANOMIPS: case EM_XTENSA: break; } /* end switch */ -- cgit