mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
Merge branch 'slab/for-6.2/alloc_size' into slab/for-next
Two patches from Kees Cook [1]: These patches work around a deficiency in GCC (>=11) and Clang (<16) where the __alloc_size attribute does not apply to inlines. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96503 This manifests as reduced overflow detection coverage for many allocation sites under CONFIG_FORTIFY_SOURCE=y, where the allocation size was not actually being propagated to __builtin_dynamic_object_size(). [1] https://lore.kernel.org/all/20221118034713.gonna.754-kees@kernel.org/
This commit is contained in:
@@ -547,42 +547,42 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
|
|||||||
* Try really hard to succeed the allocation but fail
|
* Try really hard to succeed the allocation but fail
|
||||||
* eventually.
|
* eventually.
|
||||||
*/
|
*/
|
||||||
|
#ifndef CONFIG_SLOB
|
||||||
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
|
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
|
||||||
{
|
{
|
||||||
if (__builtin_constant_p(size)) {
|
if (__builtin_constant_p(size) && size) {
|
||||||
#ifndef CONFIG_SLOB
|
|
||||||
unsigned int index;
|
unsigned int index;
|
||||||
#endif
|
|
||||||
if (size > KMALLOC_MAX_CACHE_SIZE)
|
if (size > KMALLOC_MAX_CACHE_SIZE)
|
||||||
return kmalloc_large(size, flags);
|
return kmalloc_large(size, flags);
|
||||||
#ifndef CONFIG_SLOB
|
|
||||||
index = kmalloc_index(size);
|
index = kmalloc_index(size);
|
||||||
|
|
||||||
if (!index)
|
|
||||||
return ZERO_SIZE_PTR;
|
|
||||||
|
|
||||||
return kmalloc_trace(
|
return kmalloc_trace(
|
||||||
kmalloc_caches[kmalloc_type(flags)][index],
|
kmalloc_caches[kmalloc_type(flags)][index],
|
||||||
flags, size);
|
flags, size);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
return __kmalloc(size, flags);
|
return __kmalloc(size, flags);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
|
||||||
|
{
|
||||||
|
if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
|
||||||
|
return kmalloc_large(size, flags);
|
||||||
|
|
||||||
|
return __kmalloc(size, flags);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef CONFIG_SLOB
|
#ifndef CONFIG_SLOB
|
||||||
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
|
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||||
{
|
{
|
||||||
if (__builtin_constant_p(size)) {
|
if (__builtin_constant_p(size) && size) {
|
||||||
unsigned int index;
|
unsigned int index;
|
||||||
|
|
||||||
if (size > KMALLOC_MAX_CACHE_SIZE)
|
if (size > KMALLOC_MAX_CACHE_SIZE)
|
||||||
return kmalloc_large_node(size, flags, node);
|
return kmalloc_large_node(size, flags, node);
|
||||||
|
|
||||||
index = kmalloc_index(size);
|
index = kmalloc_index(size);
|
||||||
|
|
||||||
if (!index)
|
|
||||||
return ZERO_SIZE_PTR;
|
|
||||||
|
|
||||||
return kmalloc_node_trace(
|
return kmalloc_node_trace(
|
||||||
kmalloc_caches[kmalloc_type(flags)][index],
|
kmalloc_caches[kmalloc_type(flags)][index],
|
||||||
flags, node, size);
|
flags, node, size);
|
||||||
|
|||||||
Reference in New Issue
Block a user