Skip to content

Commit

Permalink
/proc/kpageflags: prevent an integer overflow in stable_page_flags()
Browse files Browse the repository at this point in the history
stable_page_flags() returns kpageflags info in u64, but it uses "1 <<
KPF_*" internally which is considered as int.  This type mismatch causes
no visible problem now, but it will if you set bit 32 or more as done in a
subsequent patch.  So use BIT_ULL in order to avoid future overflow
issues.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Toshiki Fukasawa <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Alexey Dobriyan <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Naoya Horiguchi <[email protected]>
Cc: Junichi Nomura <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Stephen Rothwell <[email protected]>
  • Loading branch information
fukasawa-t authored and sfrothwell committed Mar 4, 2022
1 parent ef7fd62 commit 4b28382
Showing 1 changed file with 18 additions and 19 deletions.
37 changes: 18 additions & 19 deletions fs/proc/page.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ u64 stable_page_flags(struct page *page)
* it differentiates a memory hole from a page with no flags
*/
if (!page)
return 1 << KPF_NOPAGE;
return BIT_ULL(KPF_NOPAGE);

k = page->flags;
u = 0;
Expand All @@ -127,22 +127,22 @@ u64 stable_page_flags(struct page *page)
* simple test in page_mapped() is not enough.
*/
if (!PageSlab(page) && page_mapped(page))
u |= 1 << KPF_MMAP;
u |= BIT_ULL(KPF_MMAP);
if (PageAnon(page))
u |= 1 << KPF_ANON;
u |= BIT_ULL(KPF_ANON);
if (PageKsm(page))
u |= 1 << KPF_KSM;
u |= BIT_ULL(KPF_KSM);

/*
* compound pages: export both head/tail info
* they together define a compound page's start/end pos and order
*/
if (PageHead(page))
u |= 1 << KPF_COMPOUND_HEAD;
u |= BIT_ULL(KPF_COMPOUND_HEAD);
if (PageTail(page))
u |= 1 << KPF_COMPOUND_TAIL;
u |= BIT_ULL(KPF_COMPOUND_TAIL);
if (PageHuge(page))
u |= 1 << KPF_HUGE;
u |= BIT_ULL(KPF_HUGE);
/*
* PageTransCompound can be true for non-huge compound pages (slab
* pages or pages allocated by drivers with __GFP_COMP) because it
Expand All @@ -153,38 +153,37 @@ u64 stable_page_flags(struct page *page)
struct page *head = compound_head(page);

if (PageLRU(head) || PageAnon(head))
u |= 1 << KPF_THP;
u |= BIT_ULL(KPF_THP);
else if (is_huge_zero_page(head)) {
u |= 1 << KPF_ZERO_PAGE;
u |= 1 << KPF_THP;
u |= BIT_ULL(KPF_ZERO_PAGE);
u |= BIT_ULL(KPF_THP);
}
} else if (is_zero_pfn(page_to_pfn(page)))
u |= 1 << KPF_ZERO_PAGE;

u |= BIT_ULL(KPF_ZERO_PAGE);

/*
* Caveats on high order pages: page->_refcount will only be set
* -1 on the head page; SLUB/SLQB do the same for PG_slab;
* SLOB won't set PG_slab at all on compound pages.
*/
if (PageBuddy(page))
u |= 1 << KPF_BUDDY;
u |= BIT_ULL(KPF_BUDDY);
else if (page_count(page) == 0 && is_free_buddy_page(page))
u |= 1 << KPF_BUDDY;
u |= BIT_ULL(KPF_BUDDY);

if (PageOffline(page))
u |= 1 << KPF_OFFLINE;
u |= BIT_ULL(KPF_OFFLINE);
if (PageTable(page))
u |= 1 << KPF_PGTABLE;
u |= BIT_ULL(KPF_PGTABLE);

if (page_is_idle(page))
u |= 1 << KPF_IDLE;
u |= BIT_ULL(KPF_IDLE);

u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);

u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
if (PageTail(page) && PageSlab(compound_head(page)))
u |= 1 << KPF_SLAB;
u |= BIT_ULL(KPF_SLAB);

u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
Expand All @@ -197,7 +196,7 @@ u64 stable_page_flags(struct page *page)
u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);

if (PageSwapCache(page))
u |= 1 << KPF_SWAPCACHE;
u |= BIT_ULL(KPF_SWAPCACHE);
u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);

u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
Expand Down

0 comments on commit 4b28382

Please sign in to comment.