mm: gup: remove FOLL_SPLIT
Since commit5a52c9df62
("uprobe: use FOLL_SPLIT_PMD instead of FOLL_SPLIT") and commitba925fa350
("s390/gmap: improve THP splitting") FOLL_SPLIT has not been used anymore. Remove the dead code. Link: https://lkml.kernel.org/r/20210330203900.9222-1-shy828301@gmail.com Signed-off-by: Yang Shi <shy828301@gmail.com> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1d4b0166e3
commit
4066c11948
@ -53,11 +53,6 @@ prevent the page from being split by anyone.
|
||||
of handling GUP on hugetlbfs will also work fine on transparent
|
||||
hugepage backed mappings.
|
||||
|
||||
In case you can't handle compound pages if they're returned by
|
||||
follow_page, the FOLL_SPLIT bit can be specified as a parameter to
|
||||
follow_page, so that it will split the hugepages before returning
|
||||
them.
|
||||
|
||||
Graceful fallback
|
||||
=================
|
||||
|
||||
|
@ -2791,7 +2791,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
|
||||
#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
|
||||
* and return without waiting upon it */
|
||||
#define FOLL_POPULATE 0x40 /* fault in page */
|
||||
#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
|
||||
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
|
||||
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
|
||||
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
|
||||
|
28
mm/gup.c
28
mm/gup.c
@ -516,18 +516,6 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & FOLL_SPLIT && PageTransCompound(page)) {
|
||||
get_page(page);
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
lock_page(page);
|
||||
ret = split_huge_page(page);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
|
||||
if (unlikely(!try_grab_page(page, flags))) {
|
||||
page = ERR_PTR(-ENOMEM);
|
||||
@ -672,7 +660,7 @@ retry_locked:
|
||||
spin_unlock(ptl);
|
||||
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
|
||||
}
|
||||
if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
|
||||
if (flags & FOLL_SPLIT_PMD) {
|
||||
int ret;
|
||||
page = pmd_page(*pmd);
|
||||
if (is_huge_zero_page(page)) {
|
||||
@ -681,19 +669,7 @@ retry_locked:
|
||||
split_huge_pmd(vma, pmd, address);
|
||||
if (pmd_trans_unstable(pmd))
|
||||
ret = -EBUSY;
|
||||
} else if (flags & FOLL_SPLIT) {
|
||||
if (unlikely(!try_get_page(page))) {
|
||||
spin_unlock(ptl);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
spin_unlock(ptl);
|
||||
lock_page(page);
|
||||
ret = split_huge_page(page);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
if (pmd_none(*pmd))
|
||||
return no_page_table(vma, flags);
|
||||
} else { /* flags & FOLL_SPLIT_PMD */
|
||||
} else {
|
||||
spin_unlock(ptl);
|
||||
split_huge_pmd(vma, pmd, address);
|
||||
ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
|
||||
|
Loading…
Reference in New Issue
Block a user