s390/mem_detect: remove artificial kdump memory types
Simplify the memory detection code a bit by removing the CHUNK_OLDMEM and CHUNK_CRASHK memory types. They are not needed. Everything that is needed is a mechanism to insert holes into the detected memory. Reviewed-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
d3383632d4
commit
996b4a7d8f
5 changed files with 41 additions and 103 deletions
|
@ -33,8 +33,6 @@
|
||||||
|
|
||||||
#define CHUNK_READ_WRITE 0
|
#define CHUNK_READ_WRITE 0
|
||||||
#define CHUNK_READ_ONLY 1
|
#define CHUNK_READ_ONLY 1
|
||||||
#define CHUNK_OLDMEM 4
|
|
||||||
#define CHUNK_CRASHK 5
|
|
||||||
|
|
||||||
struct mem_chunk {
|
struct mem_chunk {
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
@ -47,8 +45,8 @@ extern int memory_end_set;
|
||||||
extern unsigned long memory_end;
|
extern unsigned long memory_end;
|
||||||
|
|
||||||
void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
|
void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
|
||||||
void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr,
|
void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
|
||||||
unsigned long size, int type);
|
unsigned long size);
|
||||||
|
|
||||||
#define PRIMARY_SPACE_MODE 0
|
#define PRIMARY_SPACE_MODE 0
|
||||||
#define ACCESS_REGISTER_MODE 1
|
#define ACCESS_REGISTER_MODE 1
|
||||||
|
|
|
@ -89,7 +89,7 @@ static struct mem_chunk *get_memory_layout(void)
|
||||||
|
|
||||||
chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
|
chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
|
||||||
detect_memory_layout(chunk_array, 0);
|
detect_memory_layout(chunk_array, 0);
|
||||||
create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK);
|
create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE);
|
||||||
return chunk_array;
|
return chunk_array;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,7 +344,7 @@ static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
|
||||||
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
mem_chunk = &chunk_array[i];
|
mem_chunk = &chunk_array[i];
|
||||||
if (mem_chunk->size == 0)
|
if (mem_chunk->size == 0)
|
||||||
break;
|
continue;
|
||||||
if (chunk_array[i].type != CHUNK_READ_WRITE &&
|
if (chunk_array[i].type != CHUNK_READ_WRITE &&
|
||||||
chunk_array[i].type != CHUNK_READ_ONLY)
|
chunk_array[i].type != CHUNK_READ_ONLY)
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -463,14 +463,10 @@ static void __init setup_resources(void)
|
||||||
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
if (!memory_chunk[i].size)
|
if (!memory_chunk[i].size)
|
||||||
continue;
|
continue;
|
||||||
if (memory_chunk[i].type == CHUNK_OLDMEM ||
|
|
||||||
memory_chunk[i].type == CHUNK_CRASHK)
|
|
||||||
continue;
|
|
||||||
res = alloc_bootmem_low(sizeof(*res));
|
res = alloc_bootmem_low(sizeof(*res));
|
||||||
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
|
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
|
||||||
switch (memory_chunk[i].type) {
|
switch (memory_chunk[i].type) {
|
||||||
case CHUNK_READ_WRITE:
|
case CHUNK_READ_WRITE:
|
||||||
case CHUNK_CRASHK:
|
|
||||||
res->name = "System RAM";
|
res->name = "System RAM";
|
||||||
break;
|
break;
|
||||||
case CHUNK_READ_ONLY:
|
case CHUNK_READ_ONLY:
|
||||||
|
@ -527,7 +523,7 @@ static void __init setup_memory_end(void)
|
||||||
unsigned long align;
|
unsigned long align;
|
||||||
|
|
||||||
chunk = &memory_chunk[i];
|
chunk = &memory_chunk[i];
|
||||||
if (chunk->type == CHUNK_OLDMEM)
|
if (!chunk->size)
|
||||||
continue;
|
continue;
|
||||||
align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
|
align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
|
||||||
start = (chunk->addr + align - 1) & ~(align - 1);
|
start = (chunk->addr + align - 1) & ~(align - 1);
|
||||||
|
@ -579,7 +575,7 @@ static void __init setup_memory_end(void)
|
||||||
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
struct mem_chunk *chunk = &memory_chunk[i];
|
struct mem_chunk *chunk = &memory_chunk[i];
|
||||||
|
|
||||||
if (chunk->type == CHUNK_OLDMEM)
|
if (!chunk->size)
|
||||||
continue;
|
continue;
|
||||||
if (chunk->addr >= memory_end) {
|
if (chunk->addr >= memory_end) {
|
||||||
memset(chunk, 0, sizeof(*chunk));
|
memset(chunk, 0, sizeof(*chunk));
|
||||||
|
@ -680,15 +676,6 @@ static int __init verify_crash_base(unsigned long crash_base,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Reserve kdump memory by creating a memory hole in the mem_chunk array
|
|
||||||
*/
|
|
||||||
static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
|
|
||||||
int type)
|
|
||||||
{
|
|
||||||
create_mem_hole(memory_chunk, addr, size, type);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When kdump is enabled, we have to ensure that no memory from
|
* When kdump is enabled, we have to ensure that no memory from
|
||||||
* the area [0 - crashkernel memory size] and
|
* the area [0 - crashkernel memory size] and
|
||||||
|
@ -730,8 +717,8 @@ static void reserve_oldmem(void)
|
||||||
|
|
||||||
real_size = max(real_size, chunk->addr + chunk->size);
|
real_size = max(real_size, chunk->addr + chunk->size);
|
||||||
}
|
}
|
||||||
reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM);
|
create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE);
|
||||||
reserve_kdump_bootmem(OLDMEM_SIZE, real_size - OLDMEM_SIZE, CHUNK_OLDMEM);
|
create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE);
|
||||||
if (OLDMEM_BASE + OLDMEM_SIZE == real_size)
|
if (OLDMEM_BASE + OLDMEM_SIZE == real_size)
|
||||||
saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
|
saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
|
||||||
else
|
else
|
||||||
|
@ -774,7 +761,7 @@ static void __init reserve_crashkernel(void)
|
||||||
crashk_res.start = crash_base;
|
crashk_res.start = crash_base;
|
||||||
crashk_res.end = crash_base + crash_size - 1;
|
crashk_res.end = crash_base + crash_size - 1;
|
||||||
insert_resource(&iomem_resource, &crashk_res);
|
insert_resource(&iomem_resource, &crashk_res);
|
||||||
reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK);
|
create_mem_hole(memory_chunk, crash_base, crash_size);
|
||||||
pr_info("Reserving %lluMB of memory at %lluMB "
|
pr_info("Reserving %lluMB of memory at %lluMB "
|
||||||
"for crashkernel (System RAM: %luMB)\n",
|
"for crashkernel (System RAM: %luMB)\n",
|
||||||
crash_size >> 20, crash_base >> 20, memory_end >> 20);
|
crash_size >> 20, crash_base >> 20, memory_end >> 20);
|
||||||
|
@ -846,11 +833,10 @@ static void __init setup_memory(void)
|
||||||
* Register RAM areas with the bootmem allocator.
|
* Register RAM areas with the bootmem allocator.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
unsigned long start_chunk, end_chunk, pfn;
|
unsigned long start_chunk, end_chunk, pfn;
|
||||||
|
|
||||||
if (memory_chunk[i].type != CHUNK_READ_WRITE &&
|
if (!memory_chunk[i].size)
|
||||||
memory_chunk[i].type != CHUNK_CRASHK)
|
|
||||||
continue;
|
continue;
|
||||||
start_chunk = PFN_DOWN(memory_chunk[i].addr);
|
start_chunk = PFN_DOWN(memory_chunk[i].addr);
|
||||||
end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
|
end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
|
||||||
|
|
|
@ -95,82 +95,40 @@ out:
|
||||||
EXPORT_SYMBOL(detect_memory_layout);
|
EXPORT_SYMBOL(detect_memory_layout);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Move memory chunks array from index "from" to index "to"
|
* Create memory hole with given address and size.
|
||||||
*/
|
*/
|
||||||
static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
|
void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
|
||||||
|
unsigned long size)
|
||||||
{
|
{
|
||||||
int cnt = MEMORY_CHUNKS - to;
|
int i;
|
||||||
|
|
||||||
memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialize memory chunk
|
|
||||||
*/
|
|
||||||
static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
|
|
||||||
unsigned long size, int type)
|
|
||||||
{
|
|
||||||
chunk->type = type;
|
|
||||||
chunk->addr = addr;
|
|
||||||
chunk->size = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create memory hole with given address, size, and type
|
|
||||||
*/
|
|
||||||
void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
|
|
||||||
unsigned long size, int type)
|
|
||||||
{
|
|
||||||
unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
|
|
||||||
int i, ch_type;
|
|
||||||
|
|
||||||
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
if (chunk[i].size == 0)
|
struct mem_chunk *chunk = &mem_chunk[i];
|
||||||
|
|
||||||
|
if (chunk->size == 0)
|
||||||
continue;
|
continue;
|
||||||
|
if (addr > chunk->addr + chunk->size)
|
||||||
|
continue;
|
||||||
|
if (addr + size <= chunk->addr)
|
||||||
|
continue;
|
||||||
|
/* Split */
|
||||||
|
if ((addr > chunk->addr) &&
|
||||||
|
(addr + size < chunk->addr + chunk->size)) {
|
||||||
|
struct mem_chunk *new = chunk + 1;
|
||||||
|
|
||||||
/* Define chunk properties */
|
memmove(new, chunk, (MEMORY_CHUNKS-i-1) * sizeof(*new));
|
||||||
ch_start = chunk[i].addr;
|
new->addr = addr + size;
|
||||||
ch_size = chunk[i].size;
|
new->size = chunk->addr + chunk->size - new->addr;
|
||||||
ch_end = ch_start + ch_size - 1;
|
chunk->size = addr - chunk->addr;
|
||||||
ch_type = chunk[i].type;
|
continue;
|
||||||
|
} else if ((addr <= chunk->addr) &&
|
||||||
/* Is memory chunk hit by memory hole? */
|
(addr + size >= chunk->addr + chunk->size)) {
|
||||||
if (addr + size <= ch_start)
|
memset(chunk, 0 , sizeof(*chunk));
|
||||||
continue; /* No: memory hole in front of chunk */
|
} else if (addr + size < chunk->addr + chunk->size) {
|
||||||
if (addr > ch_end)
|
chunk->size = chunk->addr + chunk->size - addr - size;
|
||||||
continue; /* No: memory hole after chunk */
|
chunk->addr = addr + size;
|
||||||
|
} else if (addr > chunk->addr) {
|
||||||
/* Yes: Define local hole properties */
|
chunk->size = addr - chunk->addr;
|
||||||
lh_start = max(addr, chunk[i].addr);
|
|
||||||
lh_end = min(addr + size - 1, ch_end);
|
|
||||||
lh_size = lh_end - lh_start + 1;
|
|
||||||
|
|
||||||
if (lh_start == ch_start && lh_end == ch_end) {
|
|
||||||
/* Hole covers complete memory chunk */
|
|
||||||
mem_chunk_init(&chunk[i], lh_start, lh_size, type);
|
|
||||||
} else if (lh_end == ch_end) {
|
|
||||||
/* Hole starts in memory chunk and convers chunk end */
|
|
||||||
mem_chunk_move(chunk, i + 1, i);
|
|
||||||
mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
|
|
||||||
ch_type);
|
|
||||||
mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
|
|
||||||
i += 1;
|
|
||||||
} else if (lh_start == ch_start) {
|
|
||||||
/* Hole ends in memory chunk */
|
|
||||||
mem_chunk_move(chunk, i + 1, i);
|
|
||||||
mem_chunk_init(&chunk[i], lh_start, lh_size, type);
|
|
||||||
mem_chunk_init(&chunk[i + 1], lh_end + 1,
|
|
||||||
ch_size - lh_size, ch_type);
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
/* Hole splits memory chunk */
|
|
||||||
mem_chunk_move(chunk, i + 2, i);
|
|
||||||
mem_chunk_init(&chunk[i], ch_start,
|
|
||||||
lh_start - ch_start, ch_type);
|
|
||||||
mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
|
|
||||||
mem_chunk_init(&chunk[i + 2], lh_end + 1,
|
|
||||||
ch_end - lh_end, ch_type);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -375,9 +375,8 @@ void __init vmem_map_init(void)
|
||||||
|
|
||||||
ro_start = PFN_ALIGN((unsigned long)&_stext);
|
ro_start = PFN_ALIGN((unsigned long)&_stext);
|
||||||
ro_end = (unsigned long)&_eshared & PAGE_MASK;
|
ro_end = (unsigned long)&_eshared & PAGE_MASK;
|
||||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
if (memory_chunk[i].type == CHUNK_CRASHK ||
|
if (!memory_chunk[i].size)
|
||||||
memory_chunk[i].type == CHUNK_OLDMEM)
|
|
||||||
continue;
|
continue;
|
||||||
start = memory_chunk[i].addr;
|
start = memory_chunk[i].addr;
|
||||||
end = memory_chunk[i].addr + memory_chunk[i].size;
|
end = memory_chunk[i].addr + memory_chunk[i].size;
|
||||||
|
@ -412,9 +411,6 @@ static int __init vmem_convert_memory_chunk(void)
|
||||||
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
if (!memory_chunk[i].size)
|
if (!memory_chunk[i].size)
|
||||||
continue;
|
continue;
|
||||||
if (memory_chunk[i].type == CHUNK_CRASHK ||
|
|
||||||
memory_chunk[i].type == CHUNK_OLDMEM)
|
|
||||||
continue;
|
|
||||||
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
|
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
|
||||||
if (!seg)
|
if (!seg)
|
||||||
panic("Out of memory...\n");
|
panic("Out of memory...\n");
|
||||||
|
|
Loading…
Add table
Reference in a new issue