clean up handling of vmap 0 Reference: /n/atom/patch/applied/vmapzero Date: Tue Jan 21 17:24:43 CET 2014 Signed-off-by: quanstro@quanstro.net --- /sys/src/nix/k10/mmu.c Tue Jan 21 17:24:39 2014 +++ /sys/src/nix/k10/mmu.c Tue Jan 21 17:24:40 2014 @@ -606,6 +606,9 @@ pa -= o; sz = ROUNDUP(size+o, PGSZ); + if(pa+sz == 0 || pa >= -KSEG2) + return nil; + /* * This is incomplete; the checks are not comprehensive * enough. Assume requests for low memory are already mapped. @@ -614,7 +617,6 @@ return KADDR(pa+o); if(pa < 1ull*MiB) return nil; - /* * only adralloc the actual request. pci bars can be less than 1 page. * take it on faith that they don't overlap. @@ -662,26 +664,29 @@ */ typedef struct Remap Remap; struct Remap { - uintmem map[150]; + uintmem map[300]; usize n; }; static Remap smap; void* -vmapoverlap(uintmem pa0, usize size) +vmapoverlap(uintmem pa0, usize sz) { int i, o; uintmem pa, p; o = pa0 - (pa0 & ~(PGSZ-1)); pa = pa0 - o; - size = ROUNDUP(size+o, PGSZ); + sz = ROUNDUP(sz+o, PGSZ); + + if(pa+sz == 0 || pa >= -KSEG2) + return nil; - if(pa+size < 1ull*MiB) + if(pa+sz < 1ull*MiB) return KADDR(pa); if(pa < 1ull*MiB) return nil; - for(p = pa; p < pa+size; p += PGSZ){ + for(p = pa; p < pa+sz; p += PGSZ){ for(i = 0; i < smap.n; i++) if(p == smap.map[i]) break;