kernel-ark/lib/lmb.c
Becky Bruce e5f2709543 [LMB]: Make lmb support large physical addressing
Convert the lmb code to use u64 instead of unsigned long for physical
addresses and sizes.  This is needed to support large amounts of RAM
on 32-bit systems that support 36-bit physical addressing.

Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-02-13 16:58:39 -08:00

366 lines
8.3 KiB
C

/*
* Procedures for maintaining information about logical memory blocks.
*
* Peter Bergner, IBM Corp. June 2001.
* Copyright (C) 2001 Peter Bergner.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/lmb.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) LMB_DBG(fmt)
#else
#define DBG(fmt...)
#endif
#define LMB_ALLOC_ANYWHERE 0
struct lmb lmb;
void lmb_dump_all(void)
{
#ifdef DEBUG
unsigned long i;
DBG("lmb_dump_all:\n");
DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
DBG(" memory.size = 0x%llx\n",
(unsigned long long)lmb.memory.size);
for (i=0; i < lmb.memory.cnt ;i++) {
DBG(" memory.region[0x%x].base = 0x%llx\n",
i, (unsigned long long)lmb.memory.region[i].base);
DBG(" .size = 0x%llx\n",
(unsigned long long)lmb.memory.region[i].size);
}
DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
DBG(" reserved.size = 0x%lx\n", lmb.reserved.size);
for (i=0; i < lmb.reserved.cnt ;i++) {
DBG(" reserved.region[0x%x].base = 0x%llx\n",
i, (unsigned long long)lmb.reserved.region[i].base);
DBG(" .size = 0x%llx\n",
(unsigned long long)lmb.reserved.region[i].size);
}
#endif /* DEBUG */
}
static unsigned long __init lmb_addrs_overlap(u64 base1,
u64 size1, u64 base2, u64 size2)
{
return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
}
static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
u64 base2, u64 size2)
{
if (base2 == base1 + size1)
return 1;
else if (base1 == base2 + size2)
return -1;
return 0;
}
static long __init lmb_regions_adjacent(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
{
u64 base1 = rgn->region[r1].base;
u64 size1 = rgn->region[r1].size;
u64 base2 = rgn->region[r2].base;
u64 size2 = rgn->region[r2].size;
return lmb_addrs_adjacent(base1, size1, base2, size2);
}
static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
{
unsigned long i;
for (i = r; i < rgn->cnt - 1; i++) {
rgn->region[i].base = rgn->region[i + 1].base;
rgn->region[i].size = rgn->region[i + 1].size;
}
rgn->cnt--;
}
/* Assumption: base addr of region 1 < base addr of region 2 */
static void __init lmb_coalesce_regions(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
{
rgn->region[r1].size += rgn->region[r2].size;
lmb_remove_region(rgn, r2);
}
/* This routine called with relocation disabled. */
void __init lmb_init(void)
{
/* Create a dummy zero size LMB which will get coalesced away later.
* This simplifies the lmb_add() code below...
*/
lmb.memory.region[0].base = 0;
lmb.memory.region[0].size = 0;
lmb.memory.cnt = 1;
/* Ditto. */
lmb.reserved.region[0].base = 0;
lmb.reserved.region[0].size = 0;
lmb.reserved.cnt = 1;
}
/* This routine may be called with relocation disabled. */
void __init lmb_analyze(void)
{
int i;
lmb.memory.size = 0;
for (i = 0; i < lmb.memory.cnt; i++)
lmb.memory.size += lmb.memory.region[i].size;
}
/* This routine called with relocation disabled. */
static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
{
unsigned long coalesced = 0;
long adjacent, i;
if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
rgn->region[0].base = base;
rgn->region[0].size = size;
return 0;
}
/* First try and coalesce this LMB with another. */
for (i=0; i < rgn->cnt; i++) {
u64 rgnbase = rgn->region[i].base;
u64 rgnsize = rgn->region[i].size;
if ((rgnbase == base) && (rgnsize == size))
/* Already have this region, so we're done */
return 0;
adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
if ( adjacent > 0 ) {
rgn->region[i].base -= size;
rgn->region[i].size += size;
coalesced++;
break;
}
else if ( adjacent < 0 ) {
rgn->region[i].size += size;
coalesced++;
break;
}
}
if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
lmb_coalesce_regions(rgn, i, i+1);
coalesced++;
}
if (coalesced)
return coalesced;
if (rgn->cnt >= MAX_LMB_REGIONS)
return -1;
/* Couldn't coalesce the LMB, so add it to the sorted table. */
for (i = rgn->cnt-1; i >= 0; i--) {
if (base < rgn->region[i].base) {
rgn->region[i+1].base = rgn->region[i].base;
rgn->region[i+1].size = rgn->region[i].size;
} else {
rgn->region[i+1].base = base;
rgn->region[i+1].size = size;
break;
}
}
rgn->cnt++;
return 0;
}
/* This routine may be called with relocation disabled. */
long __init lmb_add(u64 base, u64 size)
{
struct lmb_region *_rgn = &(lmb.memory);
/* On pSeries LPAR systems, the first LMB is our RMO region. */
if (base == 0)
lmb.rmo_size = size;
return lmb_add_region(_rgn, base, size);
}
long __init lmb_reserve(u64 base, u64 size)
{
struct lmb_region *_rgn = &(lmb.reserved);
BUG_ON(0 == size);
return lmb_add_region(_rgn, base, size);
}
long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base,
u64 size)
{
unsigned long i;
for (i=0; i < rgn->cnt; i++) {
u64 rgnbase = rgn->region[i].base;
u64 rgnsize = rgn->region[i].size;
if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
break;
}
}
return (i < rgn->cnt) ? i : -1;
}
u64 __init lmb_alloc(u64 size, u64 align)
{
return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
}
u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
{
u64 alloc;
alloc = __lmb_alloc_base(size, align, max_addr);
if (alloc == 0)
panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
(unsigned long long) size, (unsigned long long) max_addr);
return alloc;
}
static u64 lmb_align_down(u64 addr, u64 size)
{
return addr & ~(size - 1);
}
static u64 lmb_align_up(u64 addr, u64 size)
{
return (addr + (size - 1)) & ~(size - 1);
}
u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
{
long i, j;
u64 base = 0;
BUG_ON(0 == size);
/* On some platforms, make sure we allocate lowmem */
if (max_addr == LMB_ALLOC_ANYWHERE)
max_addr = LMB_REAL_LIMIT;
for (i = lmb.memory.cnt-1; i >= 0; i--) {
u64 lmbbase = lmb.memory.region[i].base;
u64 lmbsize = lmb.memory.region[i].size;
if (max_addr == LMB_ALLOC_ANYWHERE)
base = lmb_align_down(lmbbase + lmbsize - size, align);
else if (lmbbase < max_addr) {
base = min(lmbbase + lmbsize, max_addr);
base = lmb_align_down(base - size, align);
} else
continue;
while ((lmbbase <= base) &&
((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
base = lmb_align_down(lmb.reserved.region[j].base - size,
align);
if ((base != 0) && (lmbbase <= base))
break;
}
if (i < 0)
return 0;
if (lmb_add_region(&lmb.reserved, base, lmb_align_up(size, align)) < 0)
return 0;
return base;
}
/* You must call lmb_analyze() before this. */
u64 __init lmb_phys_mem_size(void)
{
return lmb.memory.size;
}
u64 __init lmb_end_of_DRAM(void)
{
int idx = lmb.memory.cnt - 1;
return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
}
/* You must call lmb_analyze() after this. */
void __init lmb_enforce_memory_limit(u64 memory_limit)
{
unsigned long i;
u64 limit;
struct lmb_property *p;
if (! memory_limit)
return;
/* Truncate the lmb regions to satisfy the memory limit. */
limit = memory_limit;
for (i = 0; i < lmb.memory.cnt; i++) {
if (limit > lmb.memory.region[i].size) {
limit -= lmb.memory.region[i].size;
continue;
}
lmb.memory.region[i].size = limit;
lmb.memory.cnt = i + 1;
break;
}
if (lmb.memory.region[0].size < lmb.rmo_size)
lmb.rmo_size = lmb.memory.region[0].size;
/* And truncate any reserves above the limit also. */
for (i = 0; i < lmb.reserved.cnt; i++) {
p = &lmb.reserved.region[i];
if (p->base > memory_limit)
p->size = 0;
else if ((p->base + p->size) > memory_limit)
p->size = memory_limit - p->base;
if (p->size == 0) {
lmb_remove_region(&lmb.reserved, i);
i--;
}
}
}
int __init lmb_is_reserved(u64 addr)
{
int i;
for (i = 0; i < lmb.reserved.cnt; i++) {
u64 upper = lmb.reserved.region[i].base +
lmb.reserved.region[i].size - 1;
if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
return 1;
}
return 0;
}