from rchard, adjust caching Reference: /n/atom/patch/applied/pimmucache Date: Mon Jan 4 16:20:33 CET 2016 Signed-off-by: quanstro@quanstro.net --- /sys/src/9/bcm/mmu.c Mon Jan 4 16:20:07 2016 +++ /sys/src/9/bcm/mmu.c Mon Jan 4 16:20:08 2016 @@ -74,11 +74,10 @@ */ l1[L1X(PHYSDRAM)] = 0; cachedwbse(&l1[L1X(PHYSDRAM)], sizeof(PTE)); - coherence(); - cacheuwbinv(); - l2cacheuwbinv(); - mmuinvalidate(); + //cacheuwbinv(); + //l2cacheuwbinv(); + mmuinvalidateaddr(PHYSDRAM); } static void @@ -136,8 +135,10 @@ Page *page; /* do kprocs get here and if so, do they need to? */ +/*** "This is plausible, but wrong" - Charles Forsyth 1 Mar 2015 if(m->mmupid == proc->pid && !proc->newtlb) return; +***/ m->mmupid = proc->pid; /* write back dirty and invalidate l1 caches */ @@ -275,7 +276,7 @@ * on this mmu because the virtual cache is set associative * rather than direct mapped. */ - cachedwbinv(); +// cachedwbinv(); if(page->cachectl[m->machno] == PG_TXTFLUSH){ /* pio() sets PG_TXTFLUSH whenever a text pg has been written */ cacheiinv(); @@ -303,7 +304,8 @@ pte = &m->mmul1[x]; if((*pte & (Fine|Section|Coarse)) != Section) return nil; - *pte &= ~(Cached|Buffered); + *pte &= ~L1ptedramattrs; + *pte |= L1sharable; mmuinvalidateaddr(va); cachedwbinvse(pte, 4); @@ -354,3 +356,8 @@ USED(pa); } +void +kunmap(KMap *k) +{ + cachedwbinvse(k, BY2PG); +}