allow big fat kernels to boot. Notes: Tue Nov 27 14:35:05 EST 2007 jmk merged with an existing local hack. Reference: /n/sources/patch/applied/fatkernel Date: Wed Nov 21 16:07:59 CET 2007 Signed-off-by: quanstro@quanstro.net Reviewed-by: jmk --- /sys/src/9/pc/l.s Wed Nov 21 16:06:56 2007 +++ /sys/src/9/pc/l.s Wed Nov 21 16:06:53 2007 @@ -130,22 +130,31 @@ TEXT mode32bit(SB), $0 /* At this point, the GDT setup is done. */ - MOVL $PADDR(CPU0PDB), DI /* clear 4 pages for the tables etc. */ + MOVL $PADDR(CPU0PDB), DI /* clear 8 pages for the tables etc. */ XORL AX, AX - MOVL $(4*BY2PG), CX + MOVL $(8*BY2PG), CX SHRL $2, CX CLD REP; STOSL + /* + * create temporary page tables. must keep the amount of + * memory mapped here in sync with memory.c:MinMem. + */ MOVL $PADDR(CPU0PDB), AX ADDL $PDO(KZERO), AX /* page directory offset for KZERO */ MOVL $PADDR(CPU0PTE), (AX) /* PTE's for KZERO */ MOVL $(PTEWRITE|PTEVALID), BX /* page permissions */ ORL BX, (AX) + ADDL $4, AX /* next 4mb */ + MOVL $PADDR(CPU0PTE+BY2PG), (AX) /* PTE's for KZERO */ + MOVL $(PTEWRITE|PTEVALID), BX /* page permissions */ + ORL BX, (AX) + MOVL $PADDR(CPU0PTE), AX /* first page of page table */ - MOVL $1024, CX /* 1024 pages in 4MB */ + MOVL $2048, CX /* 2048 pages in 8MB */ _setpte: MOVL BX, (AX) ADDL $(1<>12) & 0x03FF) #define getpgcolor(a) 0 - --- /sys/src/9/pc/memory.c Wed Nov 21 16:07:14 2007 +++ /sys/src/9/pc/memory.c Wed Nov 21 16:07:10 2007 @@ -21,26 +21,28 @@ KB = 1024, - MemMinMB = 4, /* minimum physical memory (<=4MB) */ - MemMaxMB = 3*1024+768, /* maximum physical memory to check */ - - NMemBase = 10, + /* + * "minimum" physical memory. this is actually the amount of memory in + * the temporary page tables created in l.s. currently we create two pages + * worth (8MB) for those monster venti kernels. for a kernel to boot, we + * need MemMin > end+sizeof pagetables. + */ + MemMin = 8*MB, + MemMax = (3*1024+768)*MB, /* maximum physical memory to check */ }; -typedef struct Map Map; -struct Map { +typedef struct { ulong size; ulong addr; -}; +}Map; -typedef struct RMap RMap; -struct RMap { +typedef struct{ char* name; Map* map; Map* mapend; Lock; -}; +}RMap; /* * Memory allocation tracking. @@ -331,14 +333,16 @@ * bootstrap processor MMU information and the limit is obtained from * the BIOS data area. */ - x = PADDR(CPU0MACH+BY2PG); + x = PADDR(CPU0END); bda = (uchar*)KADDR(0x400); n = ((bda[0x14]<<8)|bda[0x13])*KB-x; mapfree(&rmapram, x, n); memset(KADDR(x), 0, n); /* keep us honest */ x = PADDR(PGROUND((ulong)end)); - pa = MemMinMB*MB; + pa = MemMin; + if(x > pa) + panic("we're too big"); mapfree(&rmapram, x, pa-x); memset(KADDR(x), 0, pa-x); /* keep us honest */ } @@ -351,11 +355,11 @@ /* * The bootstrap code has has created a prototype page - * table which maps the first MemMinMB of physical memory to KZERO. + * table which maps the first MemMin of physical memory to KZERO. * The page directory is at m->pdb and the first page of * free memory is after the per-processor MMU information. */ - pa = MemMinMB*MB; + pa = MemMin; /* * Check if the extended memory size can be obtained from the CMOS. @@ -368,7 +372,7 @@ if(maxmem == 0){ x = (nvramread(0x18)<<8)|nvramread(0x17); if(x == 0 || x >= (63*KB)) - maxpa = MemMaxMB*MB; + maxpa = MemMax; else maxpa = MB+x*KB; if(maxpa < 24*MB) @@ -378,7 +382,7 @@ maxkpa = (u32int)-KZERO; /* 2^32 - KZERO */ /* - * March up memory from MemMinMB to maxpa 1MB at a time, + * March up memory from MemMin to maxpa 1MB at a time, * mapping the first page and checking the page can * be written and read correctly. The page tables are created here * on the fly, allocating from low memory as necessary. @@ -395,7 +399,7 @@ * map each 4MB we scan to the virtual address range 4MB-8MB * while we are scanning. */ - vbase = 4*MB; + vbase = MemMin; while(pa < maxpa){ /* * Map the page. Use mapalloc(&rmapram, ...) to make @@ -562,18 +566,18 @@ ulong *table, flags, maxkpa; /* - * Split any call crossing 4*MB to make below simpler. + * Split any call crossing MemMin to make below simpler. */ - if(base < 4*MB && len > 4*MB-base){ - n = 4*MB - base; + if(base < MemMin && len > MemMin-base){ + n = MemMin - base; map(base, n, type); - map(4*MB, len-n, type); + map(MemMin, len-n, type); } /* - * Let lowraminit and umbscan hash out the low 4MB. + * Let lowraminit and umbscan hash out the low MemMin. */ - if(base < 4*MB) + if(base < MemMin) return; /* @@ -586,14 +590,14 @@ } /* - * Memory below CPU0MACH is reserved for the kernel + * Memory below CPU0END is reserved for the kernel * and already mapped. */ - if(base < PADDR(CPU0MACH)+BY2PG){ - n = PADDR(CPU0MACH)+BY2PG - base; + if(base < PADDR(CPU0END)+BY2PG){ + n = PADDR(CPU0END)+BY2PG - base; if(len <= n) return; - map(PADDR(CPU0MACH), len-n, type); + map(PADDR(CPU0END), len-n, type); return; } @@ -640,7 +644,7 @@ * bottom 4MB is already mapped - just twiddle flags. * (not currently used - see above) */ - if(base < 4*MB){ + if(base < MemMin){ table = KADDR(PPN(m->pdb[PDX(base)])); e = base+len; base = PPN(base); @@ -697,21 +701,15 @@ return -1; qsort(emap, nemap, sizeof emap[0], emapcmp); - - if(getconf("*noe820print") == nil){ - for(i=0; ibase, e->base+e->len); - if(e->type < nelem(etypes)) - print("%s\n", etypes[e->type]); - else - print("type=%lud\n", e->type); - } - } - + last = 0; for(i=0; ibase, e->base+e->len); + if(e->type < nelem(etypes)) + print("%s\n", etypes[e->type]); + else + print("type=%lud\n", e->type); /* * pull out the info but only about the low 32 bits... */