mirror of
git://sourceware.org/git/glibc.git
synced 2025-03-06 20:58:33 +01:00
support: Add support_next_to_fault_before support function
Refactor the support_next_to_fault and add the support_next_to_fault_before method returns a buffer with a protected page before it, to be able to test buffer underflow accesses. Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@redhat.com>
This commit is contained in:
parent
29803ed3ce
commit
fa53723cdb
2 changed files with 39 additions and 10 deletions
|
@ -1,4 +1,4 @@
|
||||||
/* Memory allocation next to an unmapped page.
|
/* Memory allocation either before or after an unmapped page.
|
||||||
Copyright (C) 2017-2025 Free Software Foundation, Inc.
|
Copyright (C) 2017-2025 Free Software Foundation, Inc.
|
||||||
This file is part of the GNU C Library.
|
This file is part of the GNU C Library.
|
||||||
|
|
||||||
|
@ -16,34 +16,58 @@
|
||||||
License along with the GNU C Library; if not, see
|
License along with the GNU C Library; if not, see
|
||||||
<https://www.gnu.org/licenses/>. */
|
<https://www.gnu.org/licenses/>. */
|
||||||
|
|
||||||
|
#include <stdbool.h>
|
||||||
#include <support/check.h>
|
#include <support/check.h>
|
||||||
#include <support/next_to_fault.h>
|
#include <support/next_to_fault.h>
|
||||||
#include <support/xunistd.h>
|
#include <support/xunistd.h>
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#include <sys/param.h>
|
#include <sys/param.h>
|
||||||
|
|
||||||
struct support_next_to_fault
|
static struct support_next_to_fault
|
||||||
support_next_to_fault_allocate (size_t size)
|
support_next_to_fault_allocate_any (size_t size, bool fault_after_alloc)
|
||||||
{
|
{
|
||||||
long page_size = sysconf (_SC_PAGE_SIZE);
|
long page_size = sysconf (_SC_PAGE_SIZE);
|
||||||
|
long protect_offset = 0;
|
||||||
|
long buffer_offset = page_size;
|
||||||
|
|
||||||
TEST_VERIFY_EXIT (page_size > 0);
|
TEST_VERIFY_EXIT (page_size > 0);
|
||||||
struct support_next_to_fault result;
|
struct support_next_to_fault result;
|
||||||
result.region_size = roundup (size, page_size) + page_size;
|
result.region_size = roundup (size, page_size) + page_size;
|
||||||
if (size + page_size <= size || result.region_size <= size)
|
if (size + page_size <= size || result.region_size <= size)
|
||||||
FAIL_EXIT1 ("support_next_to_fault_allocate (%zu): overflow", size);
|
FAIL_EXIT1 ("%s (%zu): overflow", __func__, size);
|
||||||
result.region_start
|
result.region_start
|
||||||
= xmmap (NULL, result.region_size, PROT_READ | PROT_WRITE,
|
= xmmap (NULL, result.region_size, PROT_READ | PROT_WRITE,
|
||||||
MAP_PRIVATE | MAP_ANONYMOUS, -1);
|
MAP_PRIVATE | MAP_ANONYMOUS, -1);
|
||||||
/* Unmap the page after the allocation. */
|
|
||||||
xmprotect (result.region_start + (result.region_size - page_size),
|
if (fault_after_alloc)
|
||||||
page_size, PROT_NONE);
|
{
|
||||||
/* Align the allocation within the region so that it ends just
|
protect_offset = result.region_size - page_size;
|
||||||
before the PROT_NONE page. */
|
buffer_offset = protect_offset - size;
|
||||||
result.buffer = result.region_start + result.region_size - page_size - size;
|
}
|
||||||
|
|
||||||
|
/* Unmap the page before or after the allocation. */
|
||||||
|
xmprotect (result.region_start + protect_offset, page_size, PROT_NONE);
|
||||||
|
/* Align the allocation within the region so that it starts after or ends
|
||||||
|
just before the PROT_NONE page. */
|
||||||
|
result.buffer = result.region_start + buffer_offset;
|
||||||
result.length = size;
|
result.length = size;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Unmapped a page after the buffer */
|
||||||
|
struct support_next_to_fault
|
||||||
|
support_next_to_fault_allocate (size_t size)
|
||||||
|
{
|
||||||
|
return support_next_to_fault_allocate_any (size, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Unmapped a page before the buffer */
|
||||||
|
struct support_next_to_fault
|
||||||
|
support_next_to_fault_allocate_before (size_t size)
|
||||||
|
{
|
||||||
|
return support_next_to_fault_allocate_any (size, false);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
support_next_to_fault_free (struct support_next_to_fault *ntf)
|
support_next_to_fault_free (struct support_next_to_fault *ntf)
|
||||||
{
|
{
|
||||||
|
|
|
@ -41,6 +41,11 @@ struct support_next_to_fault
|
||||||
fault). */
|
fault). */
|
||||||
struct support_next_to_fault support_next_to_fault_allocate (size_t size);
|
struct support_next_to_fault support_next_to_fault_allocate (size_t size);
|
||||||
|
|
||||||
|
/* Allocate a buffer of SIZE bytes just *after* a page which is mapped
|
||||||
|
with PROT_NONE (so that under-running the buffer will cause a
|
||||||
|
fault). */
|
||||||
|
struct support_next_to_fault support_next_to_fault_allocate_before (size_t size);
|
||||||
|
|
||||||
/* Deallocate the memory region allocated by
|
/* Deallocate the memory region allocated by
|
||||||
next_to_fault_allocate. */
|
next_to_fault_allocate. */
|
||||||
void support_next_to_fault_free (struct support_next_to_fault *);
|
void support_next_to_fault_free (struct support_next_to_fault *);
|
||||||
|
|
Loading…
Add table
Reference in a new issue