This patch refreshes support for ARM (arm-uboot is a prerequisite). A number of common instructions used by the kernel were added to 5[al]: SVC, WFE, WFI, ERET, CPS, CPSID, CPSIE, DMB, DSB, and ISB. Support for the link register in the grammar was added (LR) and the arm acid library was updated to better reflect register state. r15 in the Ureg structure was also changed to a union of r15 and pc, similar to link and sp. Formal support for coprocessor registers was added as well. This brings 5a a touch closer to UAL and provides some modicum of validation. This change required updating use of coprocessor registers in each of the current ARM ports: bcm, kw, omap, and teg2. Finally, a number of bugs were corrected in 5db; disassembly output is now correct. Notes: Sun Feb 23 01:37:32 EST 2014 geoff wow, lots of incompatible changes. Reference: /n/sources/patch/maybe/arm-refresh Date: Fri Jul 5 08:20:04 CES 2013 Signed-off-by: sstallion@gmail.com Reviewed-by: geoff --- /arm/include/ureg.h Fri Jul 5 08:16:57 2013 +++ /arm/include/ureg.h Fri Jul 5 08:16:55 2013 @@ -22,5 +22,8 @@ }; ulong type; /* of exception */ ulong psr; - ulong pc; /* interrupted addr */ + union { + ulong r15; + ulong pc; + }; } Ureg; --- /sys/src/9/bcm/arm.s Fri Jul 5 08:17:01 2013 +++ /sys/src/9/bcm/arm.s Fri Jul 5 08:16:58 2013 @@ -19,11 +19,11 @@ #define ISB \ MOVW $0, R0; \ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEwait + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEwait #define DSB \ MOVW $0, R0; \ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait #define BARRIERS ISB; DSB --- /sys/src/9/bcm/l.s Fri Jul 5 08:17:04 2013 +++ /sys/src/9/bcm/l.s Fri Jul 5 08:17:02 2013 @@ -25,11 +25,11 @@ * disable the mmu and L1 caches * invalidate caches and tlb */ - MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MRC P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl BIC $(CpCdcache|CpCicache|CpCpredict|CpCmmu), R1 - MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvu), CpCACHEall - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvu), CpCACHEall + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv ISB /* @@ -54,16 +54,16 @@ * set up domain access control and page table base */ MOVW $Client, R1 - MCR CpSC, 0, R1, C(CpDAC), C(0) + MCR P(CpSC), 0, R1, C(CpDAC), C(0) MOVW $PADDR(L1), R1 - MCR CpSC, 0, R1, C(CpTTB), C(0) + MCR P(CpSC), 0, R1, C(CpTTB), C(0) /* * enable caches, mmu, and high vectors */ - MRC CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl ORR $(CpChv|CpCdcache|CpCicache|CpCmmu), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl ISB /* @@ -79,7 +79,7 @@ * enable cycle counter */ MOVW $1, R1 - MCR CpSC, 0, R1, C(CpSPM), C(CpSPMperf), CpSPMctl + MCR P(CpSC), 0, R1, C(CpSPM), C(CpSPMperf), CpSPMctl /* * call main and loop forever if it returns @@ -90,19 +90,19 @@ BL _div(SB) /* hack to load _div, etc. */ TEXT fsrget(SB), 1, $-4 /* data fault status */ - MRC CpSC, 0, R0, C(CpFSR), C(0), CpFSRdata + MRC P(CpSC), 0, R0, C(CpFSR), C(0), CpFSRdata RET TEXT ifsrget(SB), 1, $-4 /* instruction fault status */ - MRC CpSC, 0, R0, C(CpFSR), C(0), CpFSRinst + MRC P(CpSC), 0, R0, C(CpFSR), C(0), CpFSRinst RET TEXT farget(SB), 1, $-4 /* fault address */ - MRC CpSC, 0, R0, C(CpFAR), C(0x0) + MRC P(CpSC), 0, R0, C(CpFAR), C(0x0) RET TEXT lcycles(SB), 1, $-4 - MRC CpSC, 0, R0, C(CpSPM), C(CpSPMperf), CpSPMcyc + MRC P(CpSC), 0, R0, C(CpSPM), C(CpSPMperf), CpSPMcyc RET TEXT splhi(SB), 1, $-4 @@ -183,7 +183,7 @@ MOVW R1, CPSR MOVW $0, R0 /* wait for interrupt */ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEintr), CpCACHEwait + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEintr), CpCACHEwait ISB MOVW R3, CPSR /* splx */ @@ -199,7 +199,7 @@ */ TEXT mmuinvalidate(SB), 1, $-4 MOVW $0, R0 - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS RET @@ -208,7 +208,7 @@ * invalidate tlb entry for virtual page address va, ASID 0 */ TEXT mmuinvalidateaddr(SB), 1, $-4 - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinvse + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinvse BARRIERS RET @@ -219,7 +219,7 @@ TEXT cachedwbinv(SB), 1, $-4 DSB MOVW $0, R0 - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEall + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEall RET /* @@ -261,8 +261,8 @@ TEXT cacheuwbinv(SB), 1, $-4 BARRIERS MOVW $0, R0 - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEall - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEall + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall RET /* @@ -270,5 +270,5 @@ */ TEXT cacheiinv(SB), 1, $-4 MOVW $0, R0 - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall RET --- /sys/src/9/bcm/rebootcode.s Fri Jul 5 08:17:08 2013 +++ /sys/src/9/bcm/rebootcode.s Fri Jul 5 08:17:06 2013 @@ -25,9 +25,9 @@ BL cachesoff(SB) /* turn off mmu */ - MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MRC P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl BIC $CpCmmu, R1 - MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MCR P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl /* set up a tiny stack for local vars and memmove args */ MOVW R8, SP /* stack top just before kernel dest */ @@ -55,18 +55,18 @@ /* write back and invalidate caches */ BARRIERS MOVW $0, R0 - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEall - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEall + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall /* turn caches off */ - MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MRC P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl BIC $(CpCdcache|CpCicache|CpCpredict), R1 - MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MCR P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl /* invalidate stale TLBs before changing them */ BARRIERS MOVW $KZERO, R0 /* some valid virtual address */ - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS /* from here on, R0 is base of physical memory */ @@ -80,7 +80,7 @@ /* invalidate stale TLBs again */ BARRIERS - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS /* relocate SB and return address to PHYSDRAM addressing */ --- /sys/src/9/kw/arm.s Fri Jul 5 08:17:11 2013 +++ /sys/src/9/kw/arm.s Fri Jul 5 08:17:09 2013 @@ -31,7 +31,7 @@ #define CLZ(s, d) WORD $(0xe16f0f10 | (d) << 12 | (s)) /* count leading 0s */ #define DMB \ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEdmbarr + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEdmbarr /* * data synchronisation barrier (formerly drain write buffer). * waits for cache flushes, eviction buffer drain, tlb flushes, @@ -41,7 +41,7 @@ */ #define DSB \ MOVW $0, R0; \ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait /* * prefetch flush; zeroes R0. * arm926ej-s manual says we need to sync with l2 cache in isb, @@ -49,7 +49,7 @@ */ #define ISB \ MOVW $0, R0; \ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEwait + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEwait // MOVW (R0), R0; MOVW $0, R0 /* zeroes R0 */ --- /sys/src/9/kw/l.s Fri Jul 5 08:17:17 2013 +++ /sys/src/9/kw/l.s Fri Jul 5 08:17:14 2013 @@ -37,7 +37,7 @@ * switch to system permission & 32-bit addresses. */ MOVW $(CpCsystem|CpCd32|CpCi32), R1 - MCR CpSC, 0, R1, C(CpCONTROL), C(0) + MCR P(CpSC), 0, R1, C(CpCONTROL), C(0) ISB /* @@ -46,7 +46,7 @@ /* flush caches. 926ejs manual says we have to do it iteratively. */ _dwbinv0: - MRC CpSC, 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest + MRC P(CpSC), 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest BNE _dwbinv0 /* drain L1 write buffer, also drains L2 eviction buffer on sheeva */ BARRIERS @@ -59,20 +59,20 @@ ISB /* invalidate l2 cache */ - MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all + MCR P(CpSC), CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all ISB /* disable l2 cache. do this while l1 caches are off */ - MRC CpSC, CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf + MRC P(CpSC), CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf /* disabling write allocation is probably for cortex-a8 errata 460075 */ /* l2 off, no wr alloc, no streaming */ BIC $(CpTCl2ena | CpTCl2wralloc | CpTCldcstream), R1 - MCR CpSC, CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf + MCR P(CpSC), CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf BARRIERS /* flush caches. 926ejs manual says we have to do it iteratively. */ _dwbinv1: - MRC CpSC, 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest + MRC P(CpSC), 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest BNE _dwbinv1 BARRIERS @@ -186,8 +186,8 @@ SUB.S $1, R5 BNE _ptudbl BARRIERS - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvd), CpTLBinvse - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvd), CpTLBinvse + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS PUTC(' ') @@ -208,9 +208,9 @@ MOVW R0, CPSR BARRIERS BL cacheuwbinv(SB) - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) BIC $(CpCwb|CpCicache|CpCdcache|CpCalign), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0) + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0) BARRIERS PUTC('R') @@ -230,14 +230,14 @@ BARRIERS PUTC('e') MOVW $0, R0 - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvd), CpTLBinv - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvd), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS /* back to 29- or 26-bit addressing, mainly for SB */ - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) BIC $(CpCd32|CpCi32), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0) + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0) BARRIERS /* turn the MMU off */ @@ -293,9 +293,9 @@ ORR $(PsrDirq|PsrDfiq), R5, R4 MOVW R4, CPSR /* splhi */ - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) ORR $(CpCdcache|CpCicache|CpCwb), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0) + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0) BARRIERS MOVW R5, CPSR /* splx */ @@ -310,9 +310,9 @@ BL cacheuwbinv(SB) - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) BIC $(CpCdcache|CpCicache|CpCwb), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0) + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0) BARRIERS MOVW R5, CPSR /* splx */ @@ -331,7 +331,7 @@ BARRIERS /* force outstanding stores to cache */ /* keep writing back dirty cache lines until no more exist */ _dwb: - MRC CpSC, 0, PC, C(CpCACHE), C(CpCACHEwb), CpCACHEtest + MRC P(CpSC), 0, PC, C(CpCACHE), C(CpCACHEwb), CpCACHEtest BNE _dwb /* drain L1 write buffer, also drains L2 eviction buffer on sheeva */ BARRIERS @@ -354,7 +354,7 @@ ADD R2, R1 BIC $(CACHELINESZ-1), R2 _dwbse: - MCR CpSC, 0, R2, C(CpCACHE), C(CpCACHEwb), CpCACHEse + MCR P(CpSC), 0, R2, C(CpCACHE), C(CpCACHEwb), CpCACHEse ADD $CACHELINESZ, R2 CMP.S R2, R1 BGT _dwbse @@ -372,7 +372,7 @@ BARRIERS /* force outstanding stores to cache */ /* keep writing back dirty cache lines until no more exist */ _dwbinv: - MRC CpSC, 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest + MRC P(CpSC), 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest BNE _dwbinv /* drain L1 write buffer, also drains L2 eviction buffer on sheeva */ BARRIERS @@ -396,7 +396,7 @@ ADD R2, R1 BIC $(CACHELINESZ-1), R2 _dwbinvse: - MCR CpSC, 0, R2, C(CpCACHE), C(CpCACHEwbi), CpCACHEse + MCR P(CpSC), 0, R2, C(CpCACHE), C(CpCACHEwbi), CpCACHEse ADD $CACHELINESZ, R2 CMP.S R2, R1 BGT _dwbinvse @@ -421,7 +421,7 @@ ADD R2, R1 BIC $(CACHELINESZ-1), R2 _dinvse: - MCR CpSC, 0, R2, C(CpCACHE), C(CpCACHEinvd), CpCACHEse + MCR P(CpSC), 0, R2, C(CpCACHE), C(CpCACHEinvd), CpCACHEse ADD $CACHELINESZ, R2 CMP.S R2, R1 BGT _dinvse @@ -439,13 +439,13 @@ BARRIERS /* force outstanding stores to cache */ /* keep writing back dirty cache lines until no more exist */ _uwbinv: /* D writeback+invalidate */ - MRC CpSC, 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest + MRC P(CpSC), 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest BNE _uwbinv /* drain L1 write buffer, also drains L2 eviction buffer on sheeva */ BARRIERS MOVW $0, R0 /* I invalidate */ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall /* drain L1 write buffer, also drains L2 eviction buffer on sheeva */ BARRIERS @@ -454,7 +454,7 @@ TEXT cacheiinv(SB), 1, $-4 /* I invalidate */ BARRIERS - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall /* drain L1 write buffer, also drains L2 eviction buffer on sheeva */ BARRIERS RET @@ -462,7 +462,7 @@ TEXT cachedinv(SB), 1, $-4 /* D invalidate */ _dinv: BARRIERS - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEall + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEall /* drain L1 write buffer, also drains L2 eviction buffer on sheeva */ BARRIERS RET @@ -477,29 +477,29 @@ /* enable l2 cache in config coproc. reg. do this while l1 caches are off. */ TEXT l2cachecfgon(SB), 1, $-4 BARRIERS - MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all + MCR P(CpSC), CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all BARRIERS - MRC CpSC, CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf + MRC P(CpSC), CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf ORR $(CpTCl2ena | CpTCl2prefdis), R1 /* l2 on, prefetch off */ - MCR CpSC, CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf + MCR P(CpSC), CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf BARRIERS RET /* disable l2 cache in config coproc. reg. do this while l1 caches are off. */ TEXT l2cachecfgoff(SB), 1, $-4 BARRIERS - MRC CpSC, CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf + MRC P(CpSC), CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf BIC $CpTCl2ena, R1 - MCR CpSC, CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf + MCR P(CpSC), CpL2, R1, C(CpTESTCFG), C(CpTCl2cfg), CpTCl2conf BARRIERS - MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all + MCR P(CpSC), CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all BARRIERS RET TEXT l2cacheuwb(SB), 1, $-4 /* L2 unified writeback */ - MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2flush), CpTCl2all + MCR P(CpSC), CpL2, R0, C(CpTESTCFG), C(CpTCl2flush), CpTCl2all ISB RET @@ -515,7 +515,7 @@ ADD R2, R1 BIC $(CACHELINESZ-1), R2 _l2wbse: - MCR CpSC, CpL2, R2, C(CpTESTCFG), C(CpTCl2flush), CpTCl2seva + MCR P(CpSC), CpL2, R2, C(CpTESTCFG), C(CpTCl2flush), CpTCl2seva ADD $CACHELINESZ, R2 CMP.S R2, R1 BGT _l2wbse @@ -529,9 +529,9 @@ ORR $(PsrDirq), R3, R1 MOVW R1, CPSR - MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2flush), CpTCl2all + MCR P(CpSC), CpL2, R0, C(CpTESTCFG), C(CpTCl2flush), CpTCl2all ISB - MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all + MCR P(CpSC), CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all ISB MOVW R3, CPSR /* splx */ @@ -549,9 +549,9 @@ ADD R2, R1 BIC $(CACHELINESZ-1), R2 _l2wbinvse: - MCR CpSC, CpL2, R2, C(CpTESTCFG), C(CpTCl2flush), CpTCl2seva + MCR P(CpSC), CpL2, R2, C(CpTESTCFG), C(CpTCl2flush), CpTCl2seva ISB - MCR CpSC, CpL2, R2, C(CpTESTCFG), C(CpTCl2inv), CpTCl2seva + MCR P(CpSC), CpL2, R2, C(CpTESTCFG), C(CpTCl2inv), CpTCl2seva ADD $CACHELINESZ, R2 CMP.S R2, R1 BGT _l2wbinvse @@ -561,7 +561,7 @@ RET TEXT l2cacheuinv(SB), 1, $-4 /* L2 unified invalidate */ - MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all + MCR P(CpSC), CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all ISB RET @@ -577,7 +577,7 @@ ADD R2, R1 BIC $(CACHELINESZ-1), R2 _l2invse: - MCR CpSC, CpL2, R2, C(CpTESTCFG), C(CpTCl2inv), CpTCl2seva + MCR P(CpSC), CpL2, R2, C(CpTESTCFG), C(CpTCl2inv), CpTCl2seva ADD $CACHELINESZ, R2 CMP.S R2, R1 BGT _l2invse @@ -590,75 +590,75 @@ * enable mmu, i and d caches, and high vector */ TEXT mmuenable(SB), 1, $-4 - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) ORR $(CpChv|CpCmmu|CpCdcache|CpCicache|CpCwb|CpCsystem), R0 BIC $(CpCrom), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0) + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0) BARRIERS RET TEXT mmudisable(SB), 1, $-4 - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) BIC $(CpChv|CpCmmu|CpCdcache|CpCicache|CpCwb), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0) + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0) BARRIERS RET TEXT mmuinvalidate(SB), 1, $-4 /* invalidate all */ MOVW $0, R0 - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS RET TEXT mmuinvalidateaddr(SB), 1, $-4 /* invalidate single entry */ - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinvse + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinvse BARRIERS RET TEXT cpidget(SB), 1, $-4 /* main ID */ - MRC CpSC, 0, R0, C(CpID), C(0), CpIDid + MRC P(CpSC), 0, R0, C(CpID), C(0), CpIDid RET TEXT cpctget(SB), 1, $-4 /* cache type */ - MRC CpSC, 0, R0, C(CpID), C(0), CpIDct + MRC P(CpSC), 0, R0, C(CpID), C(0), CpIDct RET TEXT controlget(SB), 1, $-4 /* control */ - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) RET TEXT ttbget(SB), 1, $-4 /* translation table base */ - MRC CpSC, 0, R0, C(CpTTB), C(0) + MRC P(CpSC), 0, R0, C(CpTTB), C(0) RET TEXT ttbput(SB), 1, $-4 /* translation table base */ - MCR CpSC, 0, R0, C(CpTTB), C(0) + MCR P(CpSC), 0, R0, C(CpTTB), C(0) ISB RET TEXT dacget(SB), 1, $-4 /* domain access control */ - MRC CpSC, 0, R0, C(CpDAC), C(0) + MRC P(CpSC), 0, R0, C(CpDAC), C(0) RET TEXT dacput(SB), 1, $-4 /* domain access control */ - MCR CpSC, 0, R0, C(CpDAC), C(0) + MCR P(CpSC), 0, R0, C(CpDAC), C(0) ISB RET TEXT fsrget(SB), 1, $-4 /* fault status */ - MRC CpSC, 0, R0, C(CpFSR), C(0) + MRC P(CpSC), 0, R0, C(CpFSR), C(0) RET TEXT farget(SB), 1, $-4 /* fault address */ - MRC CpSC, 0, R0, C(CpFAR), C(0x0) + MRC P(CpSC), 0, R0, C(CpFAR), C(0x0) RET TEXT pidget(SB), 1, $-4 /* address translation pid */ - MRC CpSC, 0, R0, C(CpPID), C(0x0) + MRC P(CpSC), 0, R0, C(CpPID), C(0x0) RET TEXT pidput(SB), 1, $-4 /* address translation pid */ - MCR CpSC, 0, R0, C(CpPID), C(0x0) + MCR P(CpSC), 0, R0, C(CpPID), C(0x0) ISB RET @@ -763,7 +763,7 @@ MOVW R1, CPSR MOVW $0, R0 /* wait for interrupt */ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEintr), CpCACHEwait + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEintr), CpCACHEwait ISB MOVW R3, CPSR /* splx */ --- /sys/src/9/kw/rebootcode.s Fri Jul 5 08:17:20 2013 +++ /sys/src/9/kw/rebootcode.s Fri Jul 5 08:17:18 2013 @@ -95,9 +95,9 @@ BL cacheuwbinv(SB) - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) BIC $(CpCwb|CpCicache|CpCdcache|CpCalign), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0) + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0) BARRIERS /* redo double map of 0, KZERO */ @@ -116,14 +116,14 @@ BARRIERS MOVW $0, R0 - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvd), CpTLBinv - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvd), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS /* back to 29- or 26-bit addressing, mainly for SB */ - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) BIC $(CpCd32|CpCi32), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0) + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0) BARRIERS MOVW $KADDR(0x100-4), R7 /* just before this code */ @@ -136,15 +136,15 @@ RET TEXT mmudisable(SB), 1, $-4 - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) BIC $(CpChv|CpCmmu|CpCdcache|CpCicache|CpCwb), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0) + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0) BARRIERS RET TEXT mmuinvalidate(SB), 1, $-4 /* invalidate all */ MOVW $0, R0 - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS RET @@ -155,17 +155,17 @@ MOVW R1, CPSR _uwbinv: /* D writeback+invalidate */ - MRC CpSC, 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest + MRC P(CpSC), 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest BNE _uwbinv MOVW $0, R0 /* I invalidate */ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall /* drain L1 write buffer, also drains L2 eviction buffer on sheeva */ BARRIERS - MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2flush), CpTCl2all + MCR P(CpSC), CpL2, R0, C(CpTESTCFG), C(CpTCl2flush), CpTCl2all BARRIERS - MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all + MCR P(CpSC), CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all BARRIERS MOVW R3, CPSR /* splx */ --- /sys/src/9/omap/arm.s Fri Jul 5 08:17:24 2013 +++ /sys/src/9/omap/arm.s Fri Jul 5 08:17:23 2013 @@ -45,10 +45,10 @@ /* flush branch-target cache; zeroes R0 (cortex) */ #define FLBTC \ MOVW $0, R0; \ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEflushbtc + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEflushbtc /* flush one entry of the branch-target cache, va in R0 (cortex) */ #define FLBTSE \ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEflushbtse + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEflushbtse /* arm v7 arch defines these */ #define WFI WORD $0xe320f003 /* wait for interrupt */ --- /sys/src/9/omap/cache.v7.s Fri Jul 5 08:17:28 2013 +++ /sys/src/9/omap/cache.v7.s Fri Jul 5 08:17:26 2013 @@ -5,7 +5,7 @@ TEXT cacheiinv(SB), $-4 /* I invalidate */ MOVW $0, R0 - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall /* ok on cortex */ + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall /* ok on cortex */ ISB RET @@ -13,26 +13,26 @@ * set/way operators, passed a suitable set/way value in R0. */ TEXT cachedwb_sw(SB), $-4 - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEsi + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEsi RET TEXT cachedwbinv_sw(SB), $-4 - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEsi + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEsi RET TEXT cachedinv_sw(SB), $-4 - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEsi + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEsi RET /* set cache size select */ TEXT setcachelvl(SB), $-4 - MCR CpSC, CpIDcssel, R0, C(CpID), C(CpIDidct), 0 + MCR P(CpSC), CpIDcssel, R0, C(CpID), C(CpIDidct), 0 ISB RET /* return cache sizes */ TEXT getwayssets(SB), $-4 - MRC CpSC, CpIDcsize, R0, C(CpID), C(CpIDidct), 0 + MRC P(CpSC), CpIDcsize, R0, C(CpID), C(CpIDidct), 0 RET /* @@ -143,7 +143,7 @@ /* drain write buffers */ BARRIERS - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait ISB MOVW CPSR, R2 @@ -152,9 +152,9 @@ /* get cache sizes */ SLL $1, R8, R0 /* R0 = (cache - 1) << 1 */ - MCR CpSC, CpIDcssel, R0, C(CpID), C(CpIDidct), 0 /* set cache size select */ + MCR P(CpSC), CpIDcssel, R0, C(CpID), C(CpIDidct), 0 /* set cache size select */ ISB - MRC CpSC, CpIDcsize, R0, C(CpID), C(CpIDidct), 0 /* get cache sizes */ + MRC P(CpSC), CpIDcsize, R0, C(CpID), C(CpIDidct), 0 /* get cache sizes */ /* compute # of ways and sets for this cache level */ SRA $3, R0, R5 /* R5 (ways) = R0 >> 3 */ @@ -197,7 +197,7 @@ MOVW R2, CPSR /* splx */ /* drain write buffers */ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait ISB RET --- /sys/src/9/omap/l.s Fri Jul 5 08:17:36 2013 +++ /sys/src/9/omap/l.s Fri Jul 5 08:17:31 2013 @@ -41,7 +41,7 @@ /* * work around errata */ - MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl + MRC P(CpSC), 0, R1, C(CpCONTROL), C(0), CpAuxctl ORR $(CpACissue1|CpACldstissue1), R1 /* fight omap35x errata 3.1.1.9 */ ORR $CpACibe, R1 /* enable cp15 invalidate */ ORR $CpACl1pe, R1 /* enable l1 parity checking */ @@ -49,10 +49,10 @@ BIC $CpACasa, R1 /* no speculative accesses */ /* go faster with fewer restrictions */ BIC $(CpACcachenopipe|CpACcp15serial|CpACcp15waitidle|CpACcp15pipeflush), R1 - MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl + MCR P(CpSC), 0, R1, C(CpCONTROL), C(0), CpAuxctl ISB - MRC CpSC, 1, R1, C(CpCLD), C(CpCLDl2), CpCLDl2aux + MRC P(CpSC), 1, R1, C(CpCLD), C(CpCLDl2), CpCLDl2aux ORR $CpCl2nowralloc, R1 /* fight cortex errata 460075 */ ORR $(CpCl2ecc|CpCl2eccparity), R1 #ifdef TEDIUM @@ -60,7 +60,7 @@ * I don't know why this clobbers the system, but I'm tired * of arguing with this fussy processor. To hell with it. */ - MCR CpSC, 1, R1, C(CpCLD), C(CpCLDl2), CpCLDl2aux + MCR P(CpSC), 1, R1, C(CpCLD), C(CpCLDl2), CpCLDl2aux ISB #endif DELAY(printloops, 1) @@ -68,16 +68,16 @@ /* * disable the MMU & caches */ - MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MRC P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl BIC $(CpCdcache|CpCicache|CpCmmu), R1 ORR $CpCsbo, R1 BIC $CpCsbz, R1 - MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MCR P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl ISB - MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl + MRC P(CpSC), 0, R1, C(CpCONTROL), C(0), CpAuxctl BIC $CpACl2en, R1 /* turn l2 cache off */ - MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl + MCR P(CpSC), 0, R1, C(CpCONTROL), C(0), CpAuxctl ISB PUTC('l') @@ -162,23 +162,23 @@ /* invalidate caches */ BL cachedinv(SB) MOVW $KZERO, R0 - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall ISB - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait BARRIERS PUTC('f') /* * turn caches on */ - MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl + MRC P(CpSC), 0, R1, C(CpCONTROL), C(0), CpAuxctl ORR $CpACl2en, R1 /* turn l2 cache on */ - MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl + MCR P(CpSC), 0, R1, C(CpCONTROL), C(0), CpAuxctl BARRIERS - MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MRC P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl ORR $(CpCdcache|CpCicache), R1 - MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MCR P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl BARRIERS PUTC('r') @@ -239,7 +239,7 @@ no2unmap: BARRIERS MOVW $KZERO, R0 - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS #ifdef HIGH_SECURITY /* i.e., not GP omap */ @@ -247,7 +247,7 @@ // MOVW $HVECTORS, R0 MOVW $PADDR(L1), R0 SUB $(MACHSIZE+(2*1024)), R0 - MCR CpSC, 0, R0, C(CpVECS), C(CpVECSbase), CpVECSmon + MCR P(CpSC), 0, R0, C(CpVECS), C(CpVECSbase), CpVECSmon ISB #endif @@ -287,10 +287,10 @@ /* turn the caches off */ BL cacheuwbinv(SB) - MRC CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl BIC $(CpCicache|CpCdcache|CpCalign), R0 ORR $CpCsw, R0 /* enable SWP */ - MCR CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl BARRIERS /* redo double map of PHYSDRAM, KZERO & first few MBs */ @@ -304,7 +304,7 @@ BNE _ptrdbl MOVW $PHYSDRAM, R0 - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS /* turn the MMU off */ @@ -354,7 +354,7 @@ ADD R0, R1 /* R1 is end address */ BIC $(CACHELINESZ-1), R0 /* cache line start */ _dwbse: - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEse + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEse /* can't have a BARRIER here since it zeroes R0 */ ADD $CACHELINESZ, R0 CMP.S R0, R1 @@ -373,7 +373,7 @@ ADD R0, R1 /* R1 is end address */ BIC $(CACHELINESZ-1), R0 /* cache line start */ _dwbinvse: - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEse + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEse /* can't have a BARRIER here since it zeroes R0 */ ADD $CACHELINESZ, R0 CMP.S R0, R1 @@ -381,7 +381,7 @@ _wait: /* drain write buffer */ BARRIERS /* drain L1 write buffer, also drains L2 eviction buffer on sheeva */ - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEwait ISB MOVW R3, CPSR /* splx */ @@ -399,7 +399,7 @@ ADD R0, R1 /* R1 is end address */ BIC $(CACHELINESZ-1), R0 /* cache line start */ _dinvse: - MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEse + MCR P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEse /* can't have a BARRIER here since it zeroes R0 */ ADD $CACHELINESZ, R0 CMP.S R0, R1 @@ -410,16 +410,16 @@ * enable mmu and high vectors */ TEXT mmuenable(SB), 1, $-4 - MRC CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl ORR $(CpChv|CpCmmu), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl BARRIERS RET TEXT mmudisable(SB), 1, $-4 - MRC CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl BIC $(CpChv|CpCmmu), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl BARRIERS RET @@ -433,57 +433,57 @@ BARRIERS MOVW PC, R0 /* some valid virtual address */ - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS MOVW R2, CPSR /* interrupts restored */ RET TEXT mmuinvalidateaddr(SB), $-4 /* invalidate single entry */ - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinvse + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinvse BARRIERS RET TEXT cpidget(SB), 1, $-4 /* main ID */ - MRC CpSC, 0, R0, C(CpID), C(0), CpIDid + MRC P(CpSC), 0, R0, C(CpID), C(0), CpIDid RET TEXT cpctget(SB), 1, $-4 /* cache type */ - MRC CpSC, 0, R0, C(CpID), C(0), CpIDct + MRC P(CpSC), 0, R0, C(CpID), C(0), CpIDct RET TEXT controlget(SB), 1, $-4 /* control */ - MRC CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl RET TEXT ttbget(SB), 1, $-4 /* translation table base */ - MRC CpSC, 0, R0, C(CpTTB), C(0), CpTTB0 + MRC P(CpSC), 0, R0, C(CpTTB), C(0), CpTTB0 RET TEXT ttbput(SB), 1, $-4 /* translation table base */ - MCR CpSC, 0, R0, C(CpTTB), C(0), CpTTB0 - MCR CpSC, 0, R0, C(CpTTB), C(0), CpTTB1 /* cortex has two */ + MCR P(CpSC), 0, R0, C(CpTTB), C(0), CpTTB0 + MCR P(CpSC), 0, R0, C(CpTTB), C(0), CpTTB1 /* cortex has two */ ISB RET TEXT dacget(SB), 1, $-4 /* domain access control */ - MRC CpSC, 0, R0, C(CpDAC), C(0) + MRC P(CpSC), 0, R0, C(CpDAC), C(0) RET TEXT dacput(SB), 1, $-4 /* domain access control */ - MCR CpSC, 0, R0, C(CpDAC), C(0) + MCR P(CpSC), 0, R0, C(CpDAC), C(0) ISB RET TEXT fsrget(SB), 1, $-4 /* data fault status */ - MRC CpSC, 0, R0, C(CpFSR), C(0), CpDFSR + MRC P(CpSC), 0, R0, C(CpFSR), C(0), CpDFSR RET TEXT ifsrget(SB), 1, $-4 /* instruction fault status */ - MRC CpSC, 0, R0, C(CpFSR), C(0), CpIFSR + MRC P(CpSC), 0, R0, C(CpFSR), C(0), CpIFSR RET TEXT farget(SB), 1, $-4 /* fault address */ - MRC CpSC, 0, R0, C(CpFAR), C(0x0) + MRC P(CpSC), 0, R0, C(CpFAR), C(0x0) RET TEXT getpsr(SB), 1, $-4 @@ -491,15 +491,15 @@ RET TEXT getscr(SB), 1, $-4 - MRC CpSC, 0, R0, C(CpCONTROL), C(CpCONTROLscr), CpSCRscr + MRC P(CpSC), 0, R0, C(CpCONTROL), C(CpCONTROLscr), CpSCRscr RET TEXT pidget(SB), 1, $-4 /* address translation pid */ - MRC CpSC, 0, R0, C(CpPID), C(0x0) + MRC P(CpSC), 0, R0, C(CpPID), C(0x0) RET TEXT pidput(SB), 1, $-4 /* address translation pid */ - MCR CpSC, 0, R0, C(CpPID), C(0x0) + MCR P(CpSC), 0, R0, C(CpPID), C(0x0) ISB RET --- /sys/src/9/omap/rebootcode.s Fri Jul 5 08:17:40 2013 +++ /sys/src/9/omap/rebootcode.s Fri Jul 5 08:17:38 2013 @@ -24,11 +24,11 @@ BARRIERS PUTC('R') - MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl + MRC P(CpSC), 0, R1, C(CpCONTROL), C(0), CpAuxctl BIC $CpACasa, R1 /* no speculative I access forwarding to mem */ /* slow down */ ORR $(CpACcachenopipe|CpACcp15serial|CpACcp15waitidle|CpACcp15pipeflush), R1 - MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl + MCR P(CpSC), 0, R1, C(CpCONTROL), C(0), CpAuxctl BARRIERS BL cachesoff(SB) @@ -62,13 +62,13 @@ /* invalidate mmu mappings */ MOVW $KZERO, R0 /* some valid virtual address */ - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS PUTC('o') - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) BIC $(CpCmmu|CpCdcache|CpCicache), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0) /* mmu off */ + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0) /* mmu off */ BARRIERS PUTC('o') @@ -136,9 +136,9 @@ BL cacheuwbinv(SB) ADD $12, SP /* paranoia */ - MRC CpSC, 0, R0, C(CpCONTROL), C(0) + MRC P(CpSC), 0, R0, C(CpCONTROL), C(0) BIC $(CpCicache|CpCdcache), R0 - MCR CpSC, 0, R0, C(CpCONTROL), C(0) /* caches off */ + MCR P(CpSC), 0, R0, C(CpCONTROL), C(0) /* caches off */ BARRIERS /* @@ -147,7 +147,7 @@ /* invalidate stale TLBs before changing them */ MOVW $KZERO, R0 /* some valid virtual address */ - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS /* redo double map of PHYSDRAM, KZERO */ @@ -172,7 +172,7 @@ BARRIERS MOVW $KZERO, R0 /* some valid virtual address */ - MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MCR P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS /* switch back to PHYSDRAM addressing, mainly for SB */ --- /sys/src/9/teg2/arm.s Fri Jul 5 08:17:45 2013 +++ /sys/src/9/teg2/arm.s Fri Jul 5 08:17:42 2013 @@ -45,9 +45,9 @@ #define SMC WORD $0xe1600070 /* low 4-bits are call # (trustzone) */ /* flush branch-target cache */ -#define FLBTC MTCP CpSC, 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEflushbtc +#define FLBTC MTCP P(CpSC), 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEflushbtc /* flush one entry of the branch-target cache, va in R0 (cortex) */ -#define FLBTSE MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEflushbtse +#define FLBTSE MTCP P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEflushbtse /* arm v7 arch defines these */ #define DSB WORD $0xf57ff04f /* data synch. barrier; last f = SY */ @@ -128,5 +128,5 @@ /* return with cpu id in r and condition codes set from "r == 0" */ #define CPUID(r) \ - MFCP CpSC, 0, r, C(CpID), C(CpIDidct), CpIDmpid; \ + MFCP P(CpSC), 0, r, C(CpID), C(CpIDidct), CpIDmpid; \ AND.S $(MAXMACH-1), r /* mask out non-cpu-id bits */ --- /sys/src/9/teg2/cache.v7.s Fri Jul 5 08:17:49 2013 +++ /sys/src/9/teg2/cache.v7.s Fri Jul 5 08:17:47 2013 @@ -5,7 +5,7 @@ TEXT cacheiinv(SB), $-4 /* I invalidate */ MOVW $0, R0 - MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall /* ok on cortex */ + MTCP P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall /* ok on cortex */ ISB RET @@ -13,26 +13,26 @@ * set/way operators, passed a suitable set/way value in R0. */ TEXT cachedwb_sw(SB), $-4 - MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEsi + MTCP P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEsi RET TEXT cachedwbinv_sw(SB), $-4 - MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEsi + MTCP P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEsi RET TEXT cachedinv_sw(SB), $-4 - MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEsi + MTCP P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEsi RET /* set cache size select */ TEXT setcachelvl(SB), $-4 - MTCP CpSC, CpIDcssel, R0, C(CpID), C(CpIDidct), 0 + MTCP P(CpSC), CpIDcssel, R0, C(CpID), C(CpIDidct), 0 ISB RET /* return cache sizes */ TEXT getwayssets(SB), $-4 - MFCP CpSC, CpIDcsize, R0, C(CpID), C(CpIDidct), 0 + MFCP P(CpSC), CpIDcsize, R0, C(CpID), C(CpIDidct), 0 RET /* @@ -146,9 +146,9 @@ /* get cache sizes */ SLL $1, R8, R0 /* R0 = (cache - 1) << 1 */ - MTCP CpSC, CpIDcssel, R0, C(CpID), C(CpIDidct), 0 /* set cache select */ + MTCP P(CpSC), CpIDcssel, R0, C(CpID), C(CpIDidct), 0 /* set cache select */ ISB - MFCP CpSC, CpIDcsize, R0, C(CpID), C(CpIDidct), 0 /* get cache sizes */ + MFCP P(CpSC), CpIDcsize, R0, C(CpID), C(CpIDidct), 0 /* get cache sizes */ /* compute # of ways and sets for this cache level */ SRA $3, R0, R5 /* R5 (ways) = R0 >> 3 */ --- /sys/src/9/teg2/l.s Fri Jul 5 08:17:54 2013 +++ /sys/src/9/teg2/l.s Fri Jul 5 08:17:51 2013 @@ -43,7 +43,7 @@ MOVW R0, CPSR /* invalidate i-cache and branch-target cache */ - MTCP CpSC, 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall + MTCP P(CpSC), 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall BARRIERS /* put cpus other than 0 to sleep until cpu 0 is ready */ @@ -83,15 +83,15 @@ /* * disable my MMU & caches */ - MFCP CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MFCP P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl ORR $CpCsbo, R1 BIC $(CpCsbz|CpCmmu|CpCdcache|CpCicache|CpCpredict), R1 - MTCP CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MTCP P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl BARRIERS /* cortex-a9 model-specific initial configuration */ MOVW $0, R1 - MTCP CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl + MTCP P(CpSC), 0, R1, C(CpCONTROL), C(0), CpAuxctl BARRIERS PUTC('l') @@ -177,9 +177,9 @@ MOVW $(1-1), R0 /* l1 */ SLL $1, R0 /* R0 = (cache - 1) << 1 */ - MTCP CpSC, CpIDcssel, R0, C(CpID), C(CpIDid), 0 /* select l1 cache */ + MTCP P(CpSC), CpIDcssel, R0, C(CpID), C(CpIDid), 0 /* select l1 cache */ BARRIERS - MFCP CpSC, CpIDcsize, R0, C(CpID), C(CpIDid), 0 /* get sets & ways */ + MFCP P(CpSC), CpIDcsize, R0, C(CpID), C(CpIDid), 0 /* get sets & ways */ MOVW $CACHECONF, R8 /* get log2linelen into l1setsh */ @@ -218,7 +218,7 @@ * invalidate my caches before enabling */ BL cachedinv(SB) - MTCP CpSC, 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall + MTCP P(CpSC), 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall BARRIERS PUTC('f') @@ -231,14 +231,14 @@ /* * turn my L1 cache on; need it for tas below. */ - MFCP CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MFCP P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl ORR $(CpCdcache|CpCicache|CpCalign|CpCpredict), R1 - MTCP CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl + MTCP P(CpSC), 0, R1, C(CpCONTROL), C(0), CpMainctl BARRIERS /* cortex-a9 model-specific configuration */ MOVW $CpACl1pref, R1 - MTCP CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl + MTCP P(CpSC), 0, R1, C(CpCONTROL), C(0), CpAuxctl BARRIERS /* we're supposed to wait until l1 & l2 are on before calling smpon */ @@ -329,29 +329,29 @@ MOVW $(PHYSDRAM | TMPSTACK), SP /* stack for cache ops */ /* paranoia: turn my mmu and caches off. */ - MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MFCP P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl ORR $CpCsbo, R0 BIC $(CpCsbz|CpCmmu|CpCdcache|CpCicache|CpCpredict), R0 - MTCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MTCP P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl BARRIERS /* cortex-a9 model-specific initial configuration */ MOVW $0, R1 - MTCP CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl + MTCP P(CpSC), 0, R1, C(CpCONTROL), C(0), CpAuxctl ISB /* invalidate my caches before enabling */ BL cachedinv(SB) - MTCP CpSC, 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall + MTCP P(CpSC), 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall BARRIERS /* * turn my L1 cache on; need it (and mmu) for tas below. * need branch prediction to make delay() timing right. */ - MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MFCP P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl ORR $(CpCdcache|CpCicache|CpCalign|CpCpredict), R0 - MTCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MTCP P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl BARRIERS /* enable l1 caches coherency, at minimum for ldrex/strex. */ @@ -565,7 +565,7 @@ ADD R0, R1 /* R1 is end address */ BIC $(CACHELINESZ-1), R0 /* cache line start */ _dwbse: - MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEse + MTCP P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEse ADD $CACHELINESZ, R0 CMP.S R0, R1 BGT _dwbse @@ -583,7 +583,7 @@ ADD R0, R1 /* R1 is end address */ BIC $(CACHELINESZ-1), R0 /* cache line start */ _dwbinvse: - MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEse + MTCP P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEse ADD $CACHELINESZ, R0 CMP.S R0, R1 BGT _dwbinvse @@ -611,16 +611,16 @@ AND.S $(CACHELINESZ-1), R0, R4 BEQ stok BIC $(CACHELINESZ-1), R0, R4 /* cache line start */ - MTCP CpSC, 0, R4, C(CpCACHE), C(CpCACHEwb), CpCACHEse + MTCP P(CpSC), 0, R4, C(CpCACHE), C(CpCACHEwb), CpCACHEse stok: AND.S $(CACHELINESZ-1), R1, R4 BEQ endok BIC $(CACHELINESZ-1), R1, R4 /* cache line start */ - MTCP CpSC, 0, R4, C(CpCACHE), C(CpCACHEwb), CpCACHEse + MTCP P(CpSC), 0, R4, C(CpCACHE), C(CpCACHEwb), CpCACHEse endok: BIC $(CACHELINESZ-1), R0 /* cache line start */ _dinvse: - MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEse + MTCP P(CpSC), 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEse ADD $CACHELINESZ, R0 CMP.S R0, R1 BGT _dinvse @@ -630,16 +630,16 @@ * enable mmu and high vectors */ TEXT mmuenable(SB), 1, $-4 - MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MFCP P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl ORR $CpCmmu, R0 - MTCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MTCP P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl BARRIERS RET TEXT mmudisable(SB), 1, $-4 - MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MFCP P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl BIC $CpCmmu, R0 - MTCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MTCP P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl BARRIERS RET @@ -651,30 +651,30 @@ MOVW CPSR, R2 CPSID /* interrupts off */ BARRIERS - MTCP CpSC, 0, PC, C(CpTLB), C(CpTLBinvu), CpTLBinv + MTCP P(CpSC), 0, PC, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS MOVW R2, CPSR /* interrupts restored */ RET TEXT mmuinvalidateaddr(SB), $-4 /* invalidate single entry */ - MTCP CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinvse + MTCP P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinvse BARRIERS RET TEXT cpidget(SB), 1, $-4 /* main ID */ - MFCP CpSC, 0, R0, C(CpID), C(CpIDidct), CpIDid + MFCP P(CpSC), 0, R0, C(CpID), C(CpIDidct), CpIDid RET TEXT cpctget(SB), 1, $-4 /* cache type */ - MFCP CpSC, 0, R0, C(CpID), C(CpIDidct), CpIDct + MFCP P(CpSC), 0, R0, C(CpID), C(CpIDidct), CpIDct RET TEXT controlget(SB), 1, $-4 /* system control (sctlr) */ - MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl + MFCP P(CpSC), 0, R0, C(CpCONTROL), C(0), CpMainctl RET TEXT ttbget(SB), 1, $-4 /* translation table base */ - MFCP CpSC, 0, R0, C(CpTTB), C(0), CpTTB0 + MFCP P(CpSC), 0, R0, C(CpTTB), C(0), CpTTB0 RET TEXT ttbput(SB), 1, $-4 /* translation table base */ @@ -682,31 +682,31 @@ CPSID MOVW R0, R1 BARRIERS /* finish prior accesses before changing ttb */ - MTCP CpSC, 0, R1, C(CpTTB), C(0), CpTTB0 - MTCP CpSC, 0, R1, C(CpTTB), C(0), CpTTB1 /* non-secure too */ + MTCP P(CpSC), 0, R1, C(CpTTB), C(0), CpTTB0 + MTCP P(CpSC), 0, R1, C(CpTTB), C(0), CpTTB1 /* non-secure too */ MOVW $0, R0 - MTCP CpSC, 0, R0, C(CpTTB), C(0), CpTTBctl + MTCP P(CpSC), 0, R0, C(CpTTB), C(0), CpTTBctl BARRIERS MOVW R2, CPSR RET TEXT dacget(SB), 1, $-4 /* domain access control */ - MFCP CpSC, 0, R0, C(CpDAC), C(0) + MFCP P(CpSC), 0, R0, C(CpDAC), C(0) RET TEXT dacput(SB), 1, $-4 /* domain access control */ MOVW R0, R1 BARRIERS - MTCP CpSC, 0, R1, C(CpDAC), C(0) + MTCP P(CpSC), 0, R1, C(CpDAC), C(0) ISB RET TEXT fsrget(SB), 1, $-4 /* fault status */ - MFCP CpSC, 0, R0, C(CpFSR), C(0), CpDFSR + MFCP P(CpSC), 0, R0, C(CpFSR), C(0), CpDFSR RET TEXT farget(SB), 1, $-4 /* fault address */ - MFCP CpSC, 0, R0, C(CpFAR), C(0), CpDFAR + MFCP P(CpSC), 0, R0, C(CpFAR), C(0), CpDFAR RET TEXT getpsr(SB), 1, $-4 @@ -714,16 +714,16 @@ RET TEXT getscr(SB), 1, $-4 /* secure configuration */ - MFCP CpSC, 0, R0, C(CpCONTROL), C(CpCONTROLscr), CpSCRscr + MFCP P(CpSC), 0, R0, C(CpCONTROL), C(CpCONTROLscr), CpSCRscr RET TEXT pidget(SB), 1, $-4 /* address translation pid */ - MFCP CpSC, 0, R0, C(CpPID), C(0x0) + MFCP P(CpSC), 0, R0, C(CpPID), C(0x0) RET TEXT pidput(SB), 1, $-4 /* address translation pid */ - MTCP CpSC, 0, R0, C(CpPID), C(0), 0 /* pid, v7a deprecated */ - MTCP CpSC, 0, R0, C(CpPID), C(0), 1 /* context id, errata 754322 */ + MTCP P(CpSC), 0, R0, C(CpPID), C(0), 0 /* pid, v7a deprecated */ + MTCP P(CpSC), 0, R0, C(CpPID), C(0), 1 /* context id, errata 754322 */ ISB RET @@ -732,25 +732,25 @@ */ TEXT getauxctl(SB), 1, $-4 /* get cortex-a9 aux. ctl. */ - MFCP CpSC, 0, R0, C(CpCONTROL), C(0), CpAuxctl + MFCP P(CpSC), 0, R0, C(CpCONTROL), C(0), CpAuxctl RET TEXT putauxctl(SB), 1, $-4 /* put cortex-a9 aux. ctl. */ BARRIERS - MTCP CpSC, 0, R0, C(CpCONTROL), C(0), CpAuxctl + MTCP P(CpSC), 0, R0, C(CpCONTROL), C(0), CpAuxctl BARRIERS RET TEXT getclvlid(SB), 1, $-4 - MFCP CpSC, CpIDcsize, R0, C(CpID), C(CpIDidct), CpIDclvlid + MFCP P(CpSC), CpIDcsize, R0, C(CpID), C(CpIDidct), CpIDclvlid RET TEXT getcyc(SB), 1, $-4 - MFCP CpSC, 0, R0, C(CpCLD), C(CpCLDcyc), 0 + MFCP P(CpSC), 0, R0, C(CpCLD), C(CpCLDcyc), 0 RET TEXT getdebug(SB), 1, $-4 /* get cortex-a9 debug enable register */ - MFCP CpSC, 0, R0, C(1), C(1), 1 + MFCP P(CpSC), 0, R0, C(1), C(1), 1 RET TEXT getpc(SB), 1, $-4 --- /sys/src/9/teg2/lexception.s Fri Jul 5 08:17:58 2013 +++ /sys/src/9/teg2/lexception.s Fri Jul 5 08:17:56 2013 @@ -43,7 +43,7 @@ MOVW $0, R14 /* invalidate i-cache and branch-target cache */ - MTCP CpSC, 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall + MTCP P(CpSC), 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall BARRIERS BL cpureset(SB) --- /sys/src/9/teg2/rebootcode.s Fri Jul 5 08:18:03 2013 +++ /sys/src/9/teg2/rebootcode.s Fri Jul 5 08:18:01 2013 @@ -42,16 +42,16 @@ PUTC('b') /* invalidate mmu mappings */ MOVW $KZERO, R0 /* some valid virtual address */ - MTCP CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MTCP P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS PUTC('o') /* * turn the MMU off */ - MFCP CpSC, 0, R0, C(CpCONTROL), C(0) + MFCP P(CpSC), 0, R0, C(CpCONTROL), C(0) BIC $CpCmmu, R0 - MTCP CpSC, 0, R0, C(CpCONTROL), C(0) + MTCP P(CpSC), 0, R0, C(CpCONTROL), C(0) BARRIERS PUTC('o') @@ -113,9 +113,9 @@ BL cacheuwbinv(SB) ADD $12, SP /* paranoia */ - MFCP CpSC, 0, R0, C(CpCONTROL), C(0) + MFCP P(CpSC), 0, R0, C(CpCONTROL), C(0) BIC $(CpCicache|CpCdcache), R0 - MTCP CpSC, 0, R0, C(CpCONTROL), C(0) /* caches off */ + MTCP P(CpSC), 0, R0, C(CpCONTROL), C(0) /* caches off */ BARRIERS /* @@ -124,7 +124,7 @@ /* invalidate stale TLBs before changing them */ MOVW $KZERO, R0 /* some valid virtual address */ - MTCP CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MTCP P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS /* redo double map of PHYSDRAM, KZERO */ @@ -149,7 +149,7 @@ BARRIERS MOVW $KZERO, R0 /* some valid virtual address */ - MTCP CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv + MTCP P(CpSC), 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv BARRIERS /* switch back to PHYSDRAM addressing, mainly for SB */ --- /sys/src/cmd/5a/a.y Fri Jul 5 08:18:08 2013 +++ /sys/src/cmd/5a/a.y Thu Jan 9 05:30:31 2014 @@ -19,16 +19,18 @@ %token LTYPE6 LTYPE7 LTYPE8 LTYPE9 LTYPEA %token LTYPEB LTYPEC LTYPED LTYPEE LTYPEF %token LTYPEG LTYPEH LTYPEI LTYPEJ LTYPEK -%token LTYPEL LTYPEM LTYPEN LTYPEBX -%token LCONST LSP LSB LFP LPC -%token LTYPEX LR LREG LF LFREG LC LCREG LPSR LFCR -%token LCOND LS LAT +%token LTYPEL LTYPEM LTYPEN LTYPEO LTYPEP +%token LTYPEQ LTYPER +%token LTYPEBX +%token LCONST LSP LSB LFP LPC LLINK +%token LTYPEX LR LREG LF LFREG LP LPREG LC LCREG LPSR LFCR +%token LCOND LS LIF LLIMIT LAT %token LFCONST %token LSCONST %token LNAME LLAB LVAR -%type con expr oexpr pointer offset sreg spreg creg +%type con expr oexpr pointer offset sreg spreg preg creg %type rcon cond reglist -%type gen rel reg regreg freg shift fcon frcon +%type gen rel reg regreg freg shift fcon frcon iflags limit %type imm ximm name oreg ireg nireg ioreg imsr %% prog: @@ -119,7 +121,7 @@ outcode($1, Always, &nullgen, NREG, &$3); } /* - * SWI + * SVC/SWI */ | LTYPE6 cond comma gen { @@ -169,7 +171,7 @@ outcode($1, $2, &$4, $6.reg, &$6); } /* - * RET + * RET/RFE/ERET/WFE/WFI */ | LTYPEA cond comma { @@ -201,7 +203,7 @@ outcode($1, $2, &$3, NREG, &nullgen); } /* - * word + * WORD */ | LTYPEH comma ximm { @@ -227,9 +229,9 @@ outcode($1, $2, &$3, $5.reg, &nullgen); } /* - * MCR MRC + * MCR/MRC */ -| LTYPEJ cond con ',' expr ',' spreg ',' creg ',' creg oexpr +| LTYPEJ cond preg ',' expr ',' spreg ',' creg ',' creg oexpr { Gen g; @@ -271,6 +273,48 @@ { outcode($1, Always, &nullgen, NREG, &nullgen); } +/* + * CPS + */ +| LTYPEO con + { + Gen g; + + g = nullgen; + g.type = D_CONST; + g.offset = $2; + outcode($1, Always, &g, NREG, &nullgen); + } +/* + * CPSID/CPSIE + */ +| LTYPEP iflags + { + outcode($1, Always, &$2, NREG, &nullgen); + } +| LTYPEP iflags ',' con + { + Gen g; + + g = nullgen; + g.type = D_CONST; + g.offset = $4; + outcode($1, Always, &$2, NREG, &g); + } +/* + * DMB/DSB/ISB + */ +| LTYPEQ cond limit + { + outcode($1, $2, &$3, NREG, &nullgen); + } +/* + * CLZ + */ +| LTYPER cond reg ',' reg + { + outcode($1, $2, &$3, NREG, &$5); + } cond: { @@ -285,6 +329,27 @@ $$ = $1 | $2; } +iflags: + LIF + { + $$ = nullgen; + $$.type = D_CONST; + $$.offset = $1; + } + +limit: + { + $$ = nullgen; + $$.type = D_CONST; + $$.offset = L_SY; + } +| LLIMIT + { + $$ = nullgen; + $$.type = D_CONST; + $$.offset = $1; + } + comma: | ',' comma @@ -512,6 +577,10 @@ { $$ = REGPC; } +| LLINK + { + $$ = REGLINK; + } | LR '(' expr ')' { if($3 < 0 || $3 >= NREG) @@ -524,6 +593,15 @@ | LSP { $$ = REGSP; + } + +preg: + LPREG +| LP '(' expr ')' + { + if($3 < 0 || $3 >= NREG) + print("register value out of range\n"); + $$ = $3; } creg: --- /sys/src/cmd/5a/l.s Fri Jul 5 08:18:13 2013 +++ /sys/src/cmd/5a/l.s Fri Jul 5 08:18:11 2013 @@ -25,8 +25,8 @@ MOVW R3, (R2) MOVW R3, R1<<2(R2) - MRC.EQ 3,9,R3,C5,C6,2 - MRC 3,9,R3,C5,C6,2 + MRC.EQ P3,9,R3,C5,C6,2 + MRC P3,9,R3,C5,C6,2 MOVM.IA [R0,SP,R4], (R2) MOVM.DB.W (R0), [R6-R11] --- /sys/src/cmd/5a/lex.c Fri Jul 5 08:18:18 2013 +++ /sys/src/cmd/5a/lex.c Thu Jan 9 05:31:28 2014 @@ -168,8 +168,10 @@ "SB", LSB, D_EXTERN, "FP", LFP, D_PARAM, "PC", LPC, D_BRANCH, + "LR", LLINK, 0, "R", LR, 0, + "R0", LREG, 0, "R1", LREG, 1, "R2", LREG, 2, @@ -206,6 +208,25 @@ "F14", LFREG, 14, "F15", LFREG, 15, + "P", LP, 0, + + "P0", LPREG, 0, + "P1", LPREG, 1, + "P2", LPREG, 2, + "P3", LPREG, 3, + "P4", LPREG, 4, + "P5", LPREG, 5, + "P6", LPREG, 6, + "P7", LPREG, 7, + "P8", LPREG, 8, + "P9", LPREG, 9, + "P10", LPREG, 10, + "P11", LPREG, 11, + "P12", LPREG, 12, + "P13", LPREG, 13, + "P14", LPREG, 14, + "P15", LPREG, 15, + "C", LC, 0, "C0", LCREG, 0, @@ -267,6 +288,23 @@ ".DB", LS, C_PBIT, ".DA", LS, 0, + "AIF", LIF, I_ABIT|I_IBIT|I_FBIT, + "AI", LIF, I_ABIT|I_IBIT, + "AF", LIF, I_ABIT|I_FBIT, + "IF", LIF, I_IBIT|I_FBIT, + "A", LIF, I_ABIT, + "I", LIF, I_IBIT, + "F", LIF, I_FBIT, + + "SY", LLIMIT, L_SY, + "ST", LLIMIT, L_ST, + "ISH", LLIMIT, L_ISH, + "ISHST", LLIMIT, L_ISHST, + "NSH", LLIMIT, L_NSH, + "NSHST", LLIMIT, L_NSHST, + "OSH", LLIMIT, L_OSH, + "OSHST", LLIMIT, L_OSHST, + "@", LAT, 0, "AND", LTYPE1, AAND, @@ -303,18 +341,18 @@ "MOVW", LTYPE3, AMOVW, "MOVD", LTYPE3, AMOVD, - "MOVDF", LTYPE3, AMOVDF, + "MOVDF", LTYPE3, AMOVDF, "MOVDW", LTYPE3, AMOVDW, "MOVF", LTYPE3, AMOVF, - "MOVFD", LTYPE3, AMOVFD, - "MOVFW", LTYPE3, AMOVFW, + "MOVFD", LTYPE3, AMOVFD, + "MOVFW", LTYPE3, AMOVFW, "MOVWD", LTYPE3, AMOVWD, - "MOVWF", LTYPE3, AMOVWF, + "MOVWF", LTYPE3, AMOVWF, - "LDREX", LTYPE3, ALDREX, - "LDREXD", LTYPE3, ALDREXD, - "STREX", LTYPE9, ASTREX, - "STREXD", LTYPE9, ASTREXD, + "LDREX", LTYPE3, ALDREX, + "LDREXD", LTYPE3, ALDREXD, + "STREX", LTYPE9, ASTREX, + "STREXD", LTYPE9, ASTREXD, /* "ABSF", LTYPEI, AABSF, @@ -346,7 +384,7 @@ "B", LTYPE4, AB, "BL", LTYPE4, ABL, - "BX", LTYPEBX, ABX, + "BX", LTYPEBX, ABX, "BEQ", LTYPE5, ABEQ, "BNE", LTYPE5, ABNE, @@ -366,7 +404,10 @@ "BLE", LTYPE5, ABLE, "BCASE", LTYPE5, ABCASE, + "SVC", LTYPE6, ASWI, "SWI", LTYPE6, ASWI, + "WFE", LTYPEA, AWFE, + "WFI", LTYPEA, AWFI, "CMP", LTYPE7, ACMP, "TST", LTYPE7, ATST, @@ -380,6 +421,7 @@ "RET", LTYPEA, ARET, "RFE", LTYPEA, ARFE, + "ERET", LTYPEA, AERET, "TEXT", LTYPEB, ATEXT, "GLOBL", LTYPEB, AGLOBL, @@ -391,6 +433,16 @@ "MCR", LTYPEJ, 0, "MRC", LTYPEJ, 1, + + "CPS", LTYPEO, ACPS, + "CPSID", LTYPEP, ACPSID, + "CPSIE", LTYPEP, ACPSIE, + + "DMB", LTYPEQ, ADMB, + "DSB", LTYPEQ, ADSB, + "ISB", LTYPEQ, AISB, + + "CLZ", LTYPER, ACLZ, 0 }; --- /sys/src/cmd/5c/5.out.h Fri Jul 5 08:18:21 2013 +++ /sys/src/cmd/5c/5.out.h Thu Jan 9 05:29:01 2014 @@ -156,6 +156,20 @@ ALDREXD, ASTREXD, + AERET, + AWFE, + AWFI, + + ACPS, + ACPSID, + ACPSIE, + + ADMB, + ADSB, + AISB, + + ACLZ, + ALAST, }; @@ -166,6 +180,21 @@ #define C_WBIT (1<<6) #define C_FBIT (1<<7) /* psr flags-only */ #define C_UBIT (1<<7) /* up bit */ + +/* iflags */ +#define I_ABIT (1<<8) +#define I_IBIT (1<<7) +#define I_FBIT (1<<6) + +/* limit */ +#define L_SY 15 +#define L_ST 14 +#define L_ISH 11 +#define L_ISHST 10 +#define L_NSH 7 +#define L_NSHST 6 +#define L_OSH 3 +#define L_OSHST 2 /* type/name */ #define D_GOK 0 --- /sys/src/cmd/5c/enam.c Fri Jul 5 08:18:25 2013 +++ /sys/src/cmd/5c/enam.c Thu Jan 9 05:29:28 2014 @@ -100,5 +100,15 @@ "STREX", "LDREXD", "STREXD", + "ERET", + "WFE", + "WFI", + "CPS", + "CPSID", + "CPSIE", + "DMB", + "DSB", + "ISB", + "CLZ", "LAST", }; --- /sys/src/cmd/5i/arm.h Fri Jul 5 08:18:29 2013 +++ /sys/src/cmd/5i/arm.h Fri Jul 5 08:18:26 2013 @@ -143,7 +143,7 @@ }; void Ssyscall(ulong); -int armclass(long); +int armclass(ulong); void breakpoint(char*, char*); void brkchk(ulong, int); void cmd(void); --- /sys/src/cmd/5l/asm.c Fri Jul 5 08:21:29 2013 +++ /sys/src/cmd/5l/asm.c Thu Jan 9 05:32:36 2014 @@ -846,7 +846,7 @@ o1 |= p->to.reg << 12; break; - case 10: /* swi [$con] */ + case 10: /* svc/swi [$con] */ o1 = oprrr(p->as, p->scond); if(p->to.type != D_NONE) { aclass(&p->to); @@ -1454,6 +1454,40 @@ o2 |= (p->scond & C_SCOND) << 28 | FREGTMP<<16 | rt<<12; } break; + + case 77: /* eret/wfe/wfi */ + o1 = oprrr(p->as, p->scond); + break; + case 78: /* cps $con */ + o1 = 0xf102<<16; + o1 |= p->from.offset & 0x1f; + break; + case 79: /* cpsid/cpsie iflags[,$con] */ + o1 = 0xf1<<24; + if(p->as == ACPSID) + o1 |= 3<<18; + else /* ACPSIE */ + o1 |= 2<<18; + o1 |= p->from.offset; + if(p->to.reg != NREG){ + o1 |= 1<<17; + o1 |= p->to.offset & 0x1f; + } + break; + case 80: /* dmb/dsb/isb limit */ + o1 = 0xf57ff04<<4; + if(p->as == AISB) + o1 |= 1<<5; + else if(p->as == ADMB) + o1 |= 1<<4; + o1 |= p->from.offset & 0xf; + break; + case 81: /* clz reg,reg */ + o1 = oprrr(p->as, p->scond); + rf = p->from.reg; + rt = p->to.reg; + o1 |= rt<<12 | rf; + break; } if(debug['a'] > 1) @@ -1554,6 +1588,10 @@ case ASRL: return o | (0xd<<21) | (1<<5); case ASRA: return o | (0xd<<21) | (2<<5); case ASWI: return o | (0xf<<24); + case AERET: return o | (0x16<<20) | (3<<5) | (7<<1); + case AWFE: return o | (0x19<<21) | (0xf<<12) | (1<<1); + case AWFI: return o | (0x19<<21) | (0xf<<12) | (1<<1) | (1<<0); + case ACLZ: return o | (0x16<<20) | (0xf<<16) | (0xf<<8) | (1<<4); /* old arm 7500 fp using coproc 1 (1<<8) */ case AADDD: return o | (0xe<<24) | (0x0<<20) | (1<<8) | (1<<7); --- /sys/src/cmd/5l/optab.c Fri Jul 5 08:18:40 2013 +++ /sys/src/cmd/5l/optab.c Thu Jan 9 05:33:34 2014 @@ -253,5 +253,21 @@ { AMOVHU, C_LAUTO,C_NONE, C_REG, 73, 8, REGSP, LFROM|V4 }, { AMOVHU, C_LOREG,C_NONE, C_REG, 73, 8, 0, LFROM|V4 }, + { AERET, C_NONE, C_NONE, C_NONE, 77, 4, 0 }, + { AWFE, C_NONE, C_NONE, C_NONE, 77, 4, 0 }, + { AWFI, C_NONE, C_NONE, C_NONE, 77, 4, 0 }, + + { ACPS, C_LCON, C_NONE, C_NONE, 78, 4, 0 }, + { ACPSID, C_LCON, C_NONE, C_LCON, 79, 4, 0 }, + { ACPSID, C_LCON, C_NONE, C_NONE, 79, 4, 0 }, + { ACPSIE, C_LCON, C_NONE, C_LCON, 79, 4, 0 }, + { ACPSIE, C_LCON, C_NONE, C_NONE, 79, 4, 0 }, + + { ADMB, C_LCON, C_NONE, C_NONE, 80, 4, 0 }, + { ADSB, C_LCON, C_NONE, C_NONE, 80, 4, 0 }, + { AISB, C_LCON, C_NONE, C_NONE, 80, 4, 0 }, + + { ACLZ, C_REG, C_NONE, C_REG, 81, 4, 0 }, + { AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0 }, }; --- /sys/src/cmd/5l/span.c Fri Jul 5 08:18:45 2013 +++ /sys/src/cmd/5l/span.c Thu Jan 9 05:34:11 2014 @@ -755,6 +755,16 @@ case ATEXT: case ACASE: case ABCASE: + case AERET: + case AWFE: + case AWFI: + case ACPS: + case ACPSID: + case ACPSIE: + case ADMB: + case ADSB: + case AISB: + case ACLZ: break; case AADDF: oprange[AADDD] = oprange[r]; @@ -767,7 +777,7 @@ oprange[AMOVFD] = oprange[r]; oprange[AMOVDF] = oprange[r]; break; - + case ACMPF: oprange[ACMPD] = oprange[r]; break; --- /sys/src/libmach/5.c Fri Jul 5 08:18:49 2013 +++ /sys/src/libmach/5.c Fri Jul 5 08:18:47 2013 @@ -9,18 +9,16 @@ #define REGOFF(x) (ulong) (&((struct Ureg *) 0)->x) -#define SP REGOFF(r13) -#define PC REGOFF(pc) - #define REGSIZE sizeof(struct Ureg) Reglist armreglist[] = { {"TYPE", REGOFF(type), RINT|RRDONLY, 'X'}, {"PSR", REGOFF(psr), RINT|RRDONLY, 'X'}, - {"PC", PC, RINT, 'X'}, - {"SP", SP, RINT, 'X'}, - {"R15", PC, RINT, 'X'}, + {"PC", REGOFF(pc), RINT, 'X'}, + {"LR", REGOFF(link), RINT, 'X'}, + {"SP", REGOFF(sp), RINT, 'X'}, + {"R15", REGOFF(r15), RINT, 'X'}, {"R14", REGOFF(r14), RINT, 'X'}, {"R13", REGOFF(r13), RINT, 'X'}, {"R12", REGOFF(r12), RINT, 'X'}, @@ -36,10 +34,10 @@ {"R2", REGOFF(r2), RINT, 'X'}, {"R1", REGOFF(r1), RINT, 'X'}, {"R0", REGOFF(r0), RINT, 'X'}, - { 0 } + { 0 } }; - /* the machine description */ +/* the machine description */ Mach marm = { "arm", @@ -49,7 +47,7 @@ 0, /* fp register set size */ "PC", /* name of PC */ "SP", /* name of SP */ - "R14", /* name of link register */ + "LR", /* name of link register */ "setR12", /* static base register name */ 0, /* static base register value */ 0x1000, /* page size */ --- /sys/src/libmach/5db.c Fri Jul 5 08:18:54 2013 +++ /sys/src/libmach/5db.c Thu Jan 9 04:28:32 2014 @@ -11,8 +11,6 @@ #define ASR(v, s) ((long)(v) >> (s)) #define ROR(v, s) (LSR((v), (s)) | (((v) & ((1 << (s))-1)) << (32 - (s)))) - - typedef struct Instr Instr; struct Instr { @@ -29,6 +27,7 @@ uchar rs; /* bits 0-11 (shifter operand) */ long imm; /* rotated imm */ + int conv; /* converted imm */ char* curr; /* fill point in buffer */ char* end; /* end of buffer */ char* err; /* error message */ @@ -49,7 +48,6 @@ /* * Arm-specific debugger interface */ - static char *armexcep(Map*, Rgetter); static int armfoll(Map*, uvlong, Rgetter, uvlong*); static int arminst(Map*, uvlong, char, char*, int); @@ -61,7 +59,7 @@ */ Machdata armmach = { - {0x70, 0x00, 0x20, 0xD1}, /* break point */ /* D1200070 */ + {0x70, 0x00, 0x20, 0xE1}, /* break point */ /* E1200070 */ 4, /* break point size */ leswab, /* short to local byte order */ @@ -69,7 +67,7 @@ leswav, /* long to local byte order */ risctrace, /* C traceback */ riscframe, /* Frame finder */ - armexcep, /* print exception */ + armexcep, /* print exception */ 0, /* breakpoint fixup */ 0, /* single precision float printer */ 0, /* double precision float printer */ @@ -87,22 +85,21 @@ c = (*rget)(map, "TYPE"); switch ((int)c&0x1f) { case 0x11: - return "Fiq interrupt"; + return "Fast Interrupt"; case 0x12: - return "Mirq interrupt"; + return "Interrupt"; case 0x13: - return "SVC/SWI Exception"; + return "Supervisor Call"; case 0x17: return "Prefetch Abort/Data Abort"; case 0x18: return "Data Abort"; + case 0x1a: + return "Hypervisor Call"; case 0x1b: - return "Undefined instruction/Breakpoint"; - case 0x1f: - return "Sys trap"; - default: - return "Undefined trap"; + return "Undefined Instruction"; } + return ""; } static @@ -133,14 +130,36 @@ }; int -armclass(long w) +armclass(ulong w) { int op, done, cp; op = (w >> 25) & 0x7; switch(op) { case 0: /* data processing r,r,r */ + if((w >> 28) == 0xf) { /* cps et al. */ + op = (w >> 18) & 3; + switch(op) { + case 2: + op = 128; /* cpsie */ + break; + case 3: + op = 129; /* cpsid */ + break; + default: + op = 130; /* cps */ + } + break; + } op = ((w >> 4) & 0xf); + if(op == 0x1) { + op = 131; /* clz */ + break; + } + if(op == 0x6) { + op = 125; /* eret */ + break; + } if(op == 0x9) { op = 48+16; /* mul, swp or *rex */ if((w & 0x0ff00fff) == 0x01900f9f) { @@ -179,10 +198,17 @@ op += 16; break; case 1: /* data processing i,r,r */ + if ((w & 0x0ffffffe) == 0x320f002) { /* wfe, wfi */ + if (w & 1) + op = 126; /* wfi */ + else + op = 127; /* wfe */ + break; + } op = (48) + ((w >> 21) & 0xf); break; case 2: /* load/store byte/word i(r) */ - if ((w & 0xffffff8f) == 0xf57ff00f) { /* barriers, clrex */ + if ((w & 0xffffff80) == 0xf57ff000) { /* barriers, clrex */ done = 1; switch ((w >> 4) & 7) { case 1: @@ -219,7 +245,12 @@ case 5: /* branch / branch link */ op = (48+24+4+4+2) + ((w >> 24) & 0x1); break; - case 7: /* coprocessor crap */ + case 7: + if(((w >> 24) & 0xf) == 0xf){ /* svc (née swi) */ + op = 124; + break; + } + /* coprocessor crap */ cp = (w >> 8) & 0xF; if(cp == 10 || cp == 11){ /* vfp */ if((w >> 4) & 0x1){ @@ -247,14 +278,14 @@ op = 108; break; case 7: - if(((w >> 19) & 0x1) == 0) + if(((w >> 19) & 0x1) == 0){ if(((w >> 17) & 0x1) == 0) op = 109 + ((w >> 16) & 0x4) + ((w >> 15) & 0x2) + ((w >> 7) & 0x1); else if(((w >> 16) & 0x7) == 0x7) op = 117; - else + }else switch((w >> 16) & 0x7){ case 0: case 4: @@ -344,7 +375,7 @@ class == CAUTO ? " auto" : "param", offset); return 0; } - bprint(i, "%s%c%lld%s", s.name, class == CPARAM ? '+' : '-', s.value, reg); + bprint(i, "%s%c%llux%s", s.name, class == CPARAM ? '+' : '-', s.value, reg); return 1; } @@ -352,7 +383,7 @@ * Print value v as name[+offset] */ static int -gsymoff(char *buf, int n, ulong v, int space) +gsymoff(char *buf, int n, ulong v, int space, int addr) { Symbol s; int r; @@ -367,15 +398,13 @@ delta = -delta; } if (v == 0 || r == 0 || delta >= 4096) - return snprint(buf, n, "#%lux", v); - if (strcmp(s.name, ".string") == 0) - return snprint(buf, n, "#%lux", v); - if (!delta) - return snprint(buf, n, "%s", s.name); - if (s.type != 't' && s.type != 'T') - return snprint(buf, n, "%s+%llux", s.name, v-s.value); + return snprint(buf, n, "$%lux", v); + if (delta == 0) + return snprint(buf, n, "%s%s(SB)", + addr ? "$" : "", s.name); else - return snprint(buf, n, "#%lux", v); + return snprint(buf, n, "%s%s+%lux(SB)", + addr ? "$" : "", s.name, delta); } static void @@ -387,21 +416,21 @@ i->rs = (i->w >> 0) & 0xf; if(i->rn == 15 && i->rs == 0) { if(i->op == 8) { - format("MOVW", i,"CPSR, R%d"); + format("MOVW", i,"CPSR,R%d"); return; } else if(i->op == 10) { - format("MOVW", i,"SPSR, R%d"); + format("MOVW", i,"SPSR,R%d"); return; } } else if(i->rn == 9 && i->rd == 15) { if(i->op == 9) { - format("MOVW", i, "R%s, CPSR"); + format("MOVW", i, "R%s,CPSR"); return; } else if(i->op == 11) { - format("MOVW", i, "R%s, SPSR"); + format("MOVW", i, "R%s,SPSR"); return; } } @@ -426,7 +455,7 @@ i->rd = (i->w >> 12) & 0xf; i->rs = i->w&0x0f; - /* RET is encoded as ADD #0,R14,R15 */ + /* RET is encoded as ADD #0,R14,R15 */ if((i->w & 0x0fffffff) == 0x028ef000){ format("RET%C", i, ""); return; @@ -450,7 +479,7 @@ i->imm = v; i->rn = (i->w >> 16) & 0xf; i->rd = (i->w >> 12) & 0xf; - /* RET is encoded as LW.P x,R13,R15 */ + /* RET is encoded as LW.P x,R13,R15 */ if ((i->w & 0x0ffff000) == 0x049df000) { format("RET%C%p", i, "%I"); @@ -510,24 +539,6 @@ } static void -armund(Opcode *o, Instr *i) -{ - format(o->o, i, o->a); -} - -static void -armcdt(Opcode *o, Instr *i) -{ - format(o->o, i, o->a); -} - -static void -armunk(Opcode *o, Instr *i) -{ - format(o->o, i, o->a); -} - -static void armb(Opcode *o, Instr *i) { ulong v; @@ -553,14 +564,51 @@ p = (i->w >> 5) & 0x7; if(i->w&(1<<4)) { op = (i->w >> 21) & 0x07; - snprint(buf, sizeof(buf), "#%x, #%x, R%d, C(%d), C(%d), #%x", cp, op, i->rd, i->rn, i->rs, p); + snprint(buf, sizeof(buf), "P%d,%d,R%d,C%d,C%d,%d", cp, op, i->rd, i->rn, i->rs, p); } else { op = (i->w >> 20) & 0x0f; - snprint(buf, sizeof(buf), "#%x, #%x, C(%d), C(%d), C(%d), #%x", cp, op, i->rd, i->rn, i->rs, p); + snprint(buf, sizeof(buf), "P%d,%d,C%d,C%d,C%d,%d", cp, op, i->rd, i->rn, i->rs, p); + } + format(o->o, i, buf); +} + +static void +armsvc(Opcode *o, Instr *i) +{ + i->imm = i->w & 0xffffff; + format(o->o, i, o->a); +} + +static void +armcps(Opcode *o, Instr *i) +{ + char *p, *e; + char buf[12]; + + p = buf; + e = buf + sizeof buf; + if(i->w>>18 & 3) { + if(i->w & 1<<8) + p = seprint(p, e, "A"); + if(i->w & 1<<7) + p = seprint(p, e, "I"); + if(i->w & 1<<6) + p = seprint(p, e, "F"); + } + if(i->w & 1<<17) { + if(p != buf) + p = seprint(p, e, ","); + seprint(p, e, "%lux", i->w & 0x1f); } format(o->o, i, buf); } +static void +armfmt(Opcode *o, Instr *i) +{ + format(o->o, i, o->a); +} + static int armcondpass(Map *map, Rgetter rget, uchar cond) { @@ -591,7 +639,7 @@ case 10: return n == v; case 11: return n != v; case 12: return !z && (n == v); - case 13: return z && (n != v); + case 13: return z || (n != v); case 14: return 1; case 15: return 0; } @@ -819,68 +867,68 @@ "ADC%C%S", armdps, 0, "R%s,R%n,R%d", "SBC%C%S", armdps, 0, "R%s,R%n,R%d", "RSC%C%S", armdps, 0, "R%s,R%n,R%d", - "TST%C%S", armdps, 0, "R%s,R%n", - "TEQ%C%S", armdps, 0, "R%s,R%n", - "CMP%C%S", armdps, 0, "R%s,R%n", - "CMN%C%S", armdps, 0, "R%s,R%n", + "TST%C", armdps, 0, "R%s,R%n", + "TEQ%C", armdps, 0, "R%s,R%n", + "CMP%C", armdps, 0, "R%s,R%n", + "CMN%C", armdps, 0, "R%s,R%n", "ORR%C%S", armdps, 0, "R%s,R%n,R%d", "MOVW%C%S", armdps, armfmov, "R%s,R%d", "BIC%C%S", armdps, 0, "R%s,R%n,R%d", "MVN%C%S", armdps, 0, "R%s,R%d", /* 16 */ - "AND%C%S", armdps, 0, "(R%s%h%m),R%n,R%d", - "EOR%C%S", armdps, 0, "(R%s%h%m),R%n,R%d", - "SUB%C%S", armdps, 0, "(R%s%h%m),R%n,R%d", - "RSB%C%S", armdps, 0, "(R%s%h%m),R%n,R%d", - "ADD%C%S", armdps, armfadd, "(R%s%h%m),R%n,R%d", - "ADC%C%S", armdps, 0, "(R%s%h%m),R%n,R%d", - "SBC%C%S", armdps, 0, "(R%s%h%m),R%n,R%d", - "RSC%C%S", armdps, 0, "(R%s%h%m),R%n,R%d", - "TST%C%S", armdps, 0, "(R%s%h%m),R%n", - "TEQ%C%S", armdps, 0, "(R%s%h%m),R%n", - "CMP%C%S", armdps, 0, "(R%s%h%m),R%n", - "CMN%C%S", armdps, 0, "(R%s%h%m),R%n", - "ORR%C%S", armdps, 0, "(R%s%h%m),R%n,R%d", - "MOVW%C%S", armdps, armfmov, "(R%s%h%m),R%d", - "BIC%C%S", armdps, 0, "(R%s%h%m),R%n,R%d", - "MVN%C%S", armdps, 0, "(R%s%h%m),R%d", + "AND%C%S", armdps, 0, "R%s%h%m,R%n,R%d", + "EOR%C%S", armdps, 0, "R%s%h%m,R%n,R%d", + "SUB%C%S", armdps, 0, "R%s%h%m,R%n,R%d", + "RSB%C%S", armdps, 0, "R%s%h%m,R%n,R%d", + "ADD%C%S", armdps, armfadd, "R%s%h%m,R%n,R%d", + "ADC%C%S", armdps, 0, "R%s%h%m,R%n,R%d", + "SBC%C%S", armdps, 0, "R%s%h%m,R%n,R%d", + "RSC%C%S", armdps, 0, "R%s%h%m,R%n,R%d", + "TST%C", armdps, 0, "R%s%h%m,R%n", + "TEQ%C", armdps, 0, "R%s%h%m,R%n", + "CMP%C", armdps, 0, "R%s%h%m,R%n", + "CMN%C", armdps, 0, "R%s%h%m,R%n", + "ORR%C%S", armdps, 0, "R%s%h%m,R%n,R%d", + "MOVW%C%S", armdps, armfmov, "R%s%h%m,R%d", + "BIC%C%S", armdps, 0, "R%s%h%m,R%n,R%d", + "MVN%C%S", armdps, 0, "R%s%h%m,R%d", /* 32 */ - "AND%C%S", armdps, 0, "(R%s%hR%M),R%n,R%d", - "EOR%C%S", armdps, 0, "(R%s%hR%M),R%n,R%d", - "SUB%C%S", armdps, 0, "(R%s%hR%M),R%n,R%d", - "RSB%C%S", armdps, 0, "(R%s%hR%M),R%n,R%d", - "ADD%C%S", armdps, armfadd, "(R%s%hR%M),R%n,R%d", - "ADC%C%S", armdps, 0, "(R%s%hR%M),R%n,R%d", - "SBC%C%S", armdps, 0, "(R%s%hR%M),R%n,R%d", - "RSC%C%S", armdps, 0, "(R%s%hR%M),R%n,R%d", - "TST%C%S", armdps, 0, "(R%s%hR%M),R%n", - "TEQ%C%S", armdps, 0, "(R%s%hR%M),R%n", - "CMP%C%S", armdps, 0, "(R%s%hR%M),R%n", - "CMN%C%S", armdps, 0, "(R%s%hR%M),R%n", - "ORR%C%S", armdps, 0, "(R%s%hR%M),R%n,R%d", - "MOVW%C%S", armdps, armfmov, "(R%s%hR%M),R%d", - "BIC%C%S", armdps, 0, "(R%s%hR%M),R%n,R%d", - "MVN%C%S", armdps, 0, "(R%s%hR%M),R%d", + "AND%C%S", armdps, 0, "R%s%hR%M,R%n,R%d", + "EOR%C%S", armdps, 0, "R%s%hR%M,R%n,R%d", + "SUB%C%S", armdps, 0, "R%s%hR%M,R%n,R%d", + "RSB%C%S", armdps, 0, "R%s%hR%M,R%n,R%d", + "ADD%C%S", armdps, armfadd, "R%s%hR%M,R%n,R%d", + "ADC%C%S", armdps, 0, "R%s%hR%M,R%n,R%d", + "SBC%C%S", armdps, 0, "R%s%hR%M,R%n,R%d", + "RSC%C%S", armdps, 0, "R%s%hR%M,R%n,R%d", + "TST%C", armdps, 0, "R%s%hR%M,R%n", + "TEQ%C", armdps, 0, "R%s%hR%M,R%n", + "CMP%C", armdps, 0, "R%s%hR%M,R%n", + "CMN%C", armdps, 0, "R%s%hR%M,R%n", + "ORR%C%S", armdps, 0, "R%s%hR%M,R%n,R%d", + "MOVW%C%S", armdps, armfmov, "R%s%hR%M,R%d", + "BIC%C%S", armdps, 0, "R%s%hR%M,R%n,R%d", + "MVN%C%S", armdps, 0, "R%s%hR%M,R%d", /* 48 */ - "AND%C%S", armdpi, 0, "$#%i,R%n,R%d", - "EOR%C%S", armdpi, 0, "$#%i,R%n,R%d", - "SUB%C%S", armdpi, 0, "$#%i,R%n,R%d", - "RSB%C%S", armdpi, 0, "$#%i,R%n,R%d", - "ADD%C%S", armdpi, armfadd, "$#%i,R%n,R%d", - "ADC%C%S", armdpi, 0, "$#%i,R%n,R%d", - "SBC%C%S", armdpi, 0, "$#%i,R%n,R%d", - "RSC%C%S", armdpi, 0, "$#%i,R%n,R%d", - "TST%C%S", armdpi, 0, "$#%i,R%n", - "TEQ%C%S", armdpi, 0, "$#%i,R%n", - "CMP%C%S", armdpi, 0, "$#%i,R%n", - "CMN%C%S", armdpi, 0, "$#%i,R%n", - "ORR%C%S", armdpi, 0, "$#%i,R%n,R%d", - "MOVW%C%S", armdpi, armfmov, "$#%i,R%d", - "BIC%C%S", armdpi, 0, "$#%i,R%n,R%d", - "MVN%C%S", armdpi, 0, "$#%i,R%d", + "AND%C%S", armdpi, 0, "$%i,R%n,R%d", + "EOR%C%S", armdpi, 0, "$%i,R%n,R%d", + "SUB%C%S", armdpi, 0, "$%i,R%n,R%d", + "RSB%C%S", armdpi, 0, "$%i,R%n,R%d", + "ADD%C%S", armdpi, armfadd, "$%i,R%n,R%d", + "ADC%C%S", armdpi, 0, "$%i,R%n,R%d", + "SBC%C%S", armdpi, 0, "$%i,R%n,R%d", + "RSC%C%S", armdpi, 0, "$%i,R%n,R%d", + "TST%C", armdpi, 0, "$%i,R%n", + "TEQ%C", armdpi, 0, "$%i,R%n", + "CMP%C", armdpi, 0, "$%i,R%n", + "CMN%C", armdpi, 0, "$%i,R%n", + "ORR%C%S", armdpi, 0, "$%i,R%n,R%d", + "MOVW%C%S", armdpi, armfmov, "$%i,R%d", + "BIC%C%S", armdpi, 0, "$%i,R%n,R%d", + "MVN%C%S", armdpi, 0, "$%i,R%d", /* 48+16 */ "MUL%C%S", armdpi, 0, "R%M,R%s,R%n", @@ -900,21 +948,21 @@ "MOVW%C%p", armsdti, armfmov, "%I,R%d", "MOVBU%C%p", armsdti, armfmov, "%I,R%d", - "MOVW%C%p", armsdts, 0, "R%d,(R%s%h%m)(R%n)", - "MOVB%C%p", armsdts, 0, "R%d,(R%s%h%m)(R%n)", - "MOVW%C%p", armsdts, armfmov, "(R%s%h%m)(R%n),R%d", - "MOVBU%C%p", armsdts, armfmov, "(R%s%h%m)(R%n),R%d", - - "MOVM%C%P%a", armbdt, armfmovm, "[%r],(R%n)", - "MOVM%C%P%a", armbdt, armfmovm, "(R%n),[%r]", - - "B%C", armb, armfbranch, "%b", - "BL%C", armb, armfbranch, "%b", - - "CDP%C", armco, 0, "", - "CDP%C", armco, 0, "", - "MCR%C", armco, 0, "", - "MRC%C", armco, 0, "", + "MOVW%C%p", armsdts, 0, "R%d,R%s%h%m(R%n)", + "MOVB%C%p", armsdts, 0, "R%d,R%s%h%m(R%n)", + "MOVW%C%p", armsdts, armfmov, "R%s%h%m(R%n),R%d", + "MOVBU%C%p", armsdts, armfmov, "R%s%h%m(R%n),R%d", + + "MOVM%C%P%a", armbdt, armfmovm, "[%r],(R%n)", + "MOVM%C%P%a", armbdt, armfmovm, "(R%n),[%r]", + + "B%C", armb, armfbranch, "%b", + "BL%C", armb, armfbranch, "%b", + + "CDP%C", armco, 0, "", + "CDP%C", armco, 0, "", + "MCR%C", armco, 0, "", + "MRC%C", armco, 0, "", /* 48+24+4+4+2+2+4 */ "MULLU%C%S", armdpi, 0, "R%M,R%s,(R%n,R%d)", @@ -923,21 +971,21 @@ "MULAL%C%S", armdpi, 0, "R%M,R%s,(R%n,R%d)", /* 48+24+4+4+2+2+4+4 = 92 */ - "UNK", armunk, 0, "", + "UNK", armfmt, 0, "", /* new v7 arch instructions */ /* 93 */ "LDREX", armdpi, 0, "(R%n),R%d", "STREX", armdpi, 0, "R%s,(R%n),R%d", - "CLREX", armunk, 0, "", + "CLREX", armfmt, 0, "", /* 96 */ - "DSB", armunk, 0, "", - "DMB", armunk, 0, "", - "ISB", armunk, 0, "", + "DSB", armfmt, 0, "%L", + "DMB", armfmt, 0, "%L", + "ISB", armfmt, 0, "%L", /* 99 */ - "RFEV7%P%a", armbdt, 0, "(R%n)", + "RFE%P%a", armbdt, 0, "(R%n)", /* 100 */ "MLA%f%C", armdps, 0, "F%s,F%n,F%d", @@ -970,16 +1018,27 @@ "MOVW%C", armdps, 0, "%x,R%d", /* 122 */ - "MOV%f%C", armvstdi, 0, "F%d,%I", - "MOV%f%C", armvstdi, 0, "%I,F%d", -}; + "MOV%f%C", armvstdi, 0, "F%d,%I", + "MOV%f%C", armvstdi, 0, "%I,F%d", -static void -gaddr(Instr *i) -{ - *i->curr++ = '$'; - i->curr += gsymoff(i->curr, i->end-i->curr, i->imm, CANY); -} +/* 124 */ + "SVC%C", armsvc, 0, "%i", + +/* 125 */ + "ERET%C", armfmt, 0, "", + +/* 126 */ + "WFI%C", armfmt, 0, "", + "WFE%C", armfmt, 0, "", + +/* 128 */ + "CPSIE", armcps, 0, "", + "CPSID", armcps, 0, "", + "CPS", armcps, 0, "", + +/* 131 */ + "CLZ", armdps, 0, "R%s,R%d", +}; static char *mode[] = { 0, "IA", "DB", "IB" }; static char *pw[] = { "P", "PW", 0, "W" }; @@ -1058,13 +1117,13 @@ break; } g = 0; - fmt = "#%lx(R%d)"; + fmt = "%lx(R%d)"; if (i->rn == 15) { /* convert load of offset(PC) to a load immediate */ if (get4(i->map, i->addr+i->imm+8, (ulong*)&i->imm) > 0) { + i->conv = 1; g = 1; - fmt = ""; } } if (mach->sb) @@ -1075,23 +1134,21 @@ if (get4(i->map, i->addr+4, &nxti) > 0) { if ((nxti & 0x0e0f0fff) == 0x060c000b) { i->imm += mach->sb; + i->conv = 0; g = 1; - fmt = "-SB"; } } } if (i->rn == 12) { i->imm += mach->sb; + i->conv = 0; g = 1; - fmt = "-SB(SB)"; } } if (g) - { - gaddr(i); - bprint(i, fmt, i->rn); - } + i->curr += gsymoff(i->curr, i->end-i->curr, + i->imm, CANY, i->conv); else bprint(i, fmt, i->imm, i->rn); break; @@ -1112,13 +1169,13 @@ break; case 'b': - i->curr += symoff(i->curr, i->end-i->curr, - (ulong)i->imm, CTEXT); + i->curr += gsymoff(i->curr, i->end-i->curr, + i->imm, CTEXT, 0); break; case 'g': i->curr += gsymoff(i->curr, i->end-i->curr, - i->imm, CANY); + i->imm, CANY, 0); break; case 'f': @@ -1157,6 +1214,32 @@ break; case 0xF: bprint(i, "DF"); + break; + } + break; + + case 'L': + switch(i->w & 0xf) { + case 14: + bprint(i, "ST"); + break; + case 11: + bprint(i, "ISH"); + break; + case 10: + bprint(i, "ISHST"); + break; + case 7: + bprint(i, "NSH"); + break; + case 6: + bprint(i, "NSHST"); + break; + case 3: + bprint(i, "OSH"); + break; + case 2: + bprint(i, "OSHST"); break; } break; --- /sys/lib/acid/arm Fri Jul 5 08:18:58 2013 +++ /sys/lib/acid/arm Mon Dec 23 06:45:57 2013 @@ -18,31 +18,36 @@ defn linkreg(addr) { - return *R14; + return *LR; } defn stk() // trace { - _stk(*PC, *SP, linkreg(0), 0); + _stk(*PC, *SP, *LR, 0); } defn lstk() // trace with locals { - _stk(*PC, *SP, linkreg(0), 1); + _stk(*PC, *SP, *LR, 1); +} + +defn spr() // print special purpose registers +{ + print("PC\t", *PC, " ", fmt(*PC, 'a'), " "); pfl(*PC); + print("PSR\t", *PSR, " TYPE\t", *TYPE, " ", reason(*TYPE), "\n"); } defn gpr() // print general purpose registers { - print("R0\t", *R0, " R1\t", *R1, " R2\t", *R2, "\n"); - print("R3\t", *R3, " R4\t", *R4, " R5\t", *R5, "\n"); - print("R6\t", *R6, " R7\t", *R7, " R8\t", *R8, "\n"); - print("R9\t", *R9, " R10\t", *R10, " R11\t", *R11, "\n"); - print("R12\t", *R12, " R13\t", *R13, " R14\t", *R14, "\n"); - print("R15\t", *R15, "\n"); + print("R0\t", *R0, " R1\t", *R1, " R2\t", *R2, " R3\t", *R3, "\n"); + print("R4\t", *R4, " R5\t", *R5, " R6\t", *R6, " R7\t", *R7, "\n"); + print("R8\t", *R8, " R9\t", *R9, " R10\t", *R10, " R11\t", *R11, "\n"); + print("R12\t", *R12, " R13\t", *R13, " R14\t", *R14, " R15\t", *R15, "\n"); } defn regs() // print all registers { + spr(); gpr(); } --- /sys/lib/acid/kernek Mon Aug 12 21:28:12 2013 +++ /sys/lib/acid/kernek Mon Aug 12 21:28:16 2013 @@ -382,6 +382,10 @@ map({"*data", KZERO, 0xffffffff, KZERO}); kdir = "alpha"; } + if objtype == "arm" then { + map({"*data", KZERO, 0xffffffff, KZERO}); + kdir = "arm"; + } needacid("proc"); } }