[POWERPC] ppc405 Fix arithmatic rollover bug when memory size under 16M

mmu_mapin_ram() loops over total_lowmem to setup page tables.  However, if
total_lowmem is less that 16M, the subtraction rolls over and results in
a number just under 4G (because total_lowmem is an unsigned value).

This patch rejigs the loop from countup to countdown to eliminate the
bug.

Special thanks to Magnus Hjorth who wrote the original patch to fix this
bug.  This patch improves on his by making the loop code simpler (which
also eliminates the possibility of another rollover at the high end)
and also applies the change to arch/powerpc.

Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
This commit is contained in:
Grant Likely 2007-10-31 17:41:20 +11:00 committed by Josh Boyer
parent b98ac05d5e
commit bd942ba3db
2 changed files with 16 additions and 18 deletions

View File

@ -98,13 +98,12 @@ unsigned long __init mmu_mapin_ram(void)
v = KERNELBASE; v = KERNELBASE;
p = PPC_MEMSTART; p = PPC_MEMSTART;
s = 0; s = total_lowmem;
if (__map_without_ltlbs) { if (__map_without_ltlbs)
return s; return 0;
}
while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) { while (s >= LARGE_PAGE_SIZE_16M) {
pmd_t *pmdp; pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
@ -116,10 +115,10 @@ unsigned long __init mmu_mapin_ram(void)
v += LARGE_PAGE_SIZE_16M; v += LARGE_PAGE_SIZE_16M;
p += LARGE_PAGE_SIZE_16M; p += LARGE_PAGE_SIZE_16M;
s += LARGE_PAGE_SIZE_16M; s -= LARGE_PAGE_SIZE_16M;
} }
while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) { while (s >= LARGE_PAGE_SIZE_4M) {
pmd_t *pmdp; pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
@ -128,8 +127,8 @@ unsigned long __init mmu_mapin_ram(void)
v += LARGE_PAGE_SIZE_4M; v += LARGE_PAGE_SIZE_4M;
p += LARGE_PAGE_SIZE_4M; p += LARGE_PAGE_SIZE_4M;
s += LARGE_PAGE_SIZE_4M; s -= LARGE_PAGE_SIZE_4M;
} }
return s; return total_lowmem - s;
} }

View File

@ -99,13 +99,12 @@ unsigned long __init mmu_mapin_ram(void)
v = KERNELBASE; v = KERNELBASE;
p = PPC_MEMSTART; p = PPC_MEMSTART;
s = 0; s = total_lowmem;
if (__map_without_ltlbs) { if (__map_without_ltlbs)
return s; return 0;
}
while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) { while (s >= LARGE_PAGE_SIZE_16M) {
pmd_t *pmdp; pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
@ -117,10 +116,10 @@ unsigned long __init mmu_mapin_ram(void)
v += LARGE_PAGE_SIZE_16M; v += LARGE_PAGE_SIZE_16M;
p += LARGE_PAGE_SIZE_16M; p += LARGE_PAGE_SIZE_16M;
s += LARGE_PAGE_SIZE_16M; s -= LARGE_PAGE_SIZE_16M;
} }
while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) { while (s >= LARGE_PAGE_SIZE_4M) {
pmd_t *pmdp; pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
@ -129,8 +128,8 @@ unsigned long __init mmu_mapin_ram(void)
v += LARGE_PAGE_SIZE_4M; v += LARGE_PAGE_SIZE_4M;
p += LARGE_PAGE_SIZE_4M; p += LARGE_PAGE_SIZE_4M;
s += LARGE_PAGE_SIZE_4M; s -= LARGE_PAGE_SIZE_4M;
} }
return s; return total_lowmem - s;
} }