Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

We got slightly different patches removing a double word
in a comment in net/ipv4/raw.c - picked the version from net.

Simple conflict in drivers/net/ethernet/ibm/ibmvnic.c. Use cached
values instead of VNIC login response buffer (following what
commit 507ebe6444 ("ibmvnic: Fix use-after-free of VNIC login
response buffer") did).

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2020-09-04 21:18:58 -07:00
1669 changed files with 8287 additions and 5744 deletions

View File

@@ -381,22 +381,13 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
}
/*
* FOLL_FORCE or a forced COW break can write even to unwritable pte's,
* but only after we've gone through a COW cycle and they are dirty.
* FOLL_FORCE can write to even unwritable pte's, but only
* after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
}
/*
* A (separate) COW fault might break the page the other way and
* get_user_pages() would return the page from what is now the wrong
* VM. So we need to force a COW break at GUP time even for reads.
*/
static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
{
return is_cow_mapping(vma->vm_flags) && (flags & (FOLL_GET | FOLL_PIN));
return pte_write(pte) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}
static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -843,7 +834,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
goto unmap;
*page = pte_page(*pte);
}
if (unlikely(!try_get_page(*page))) {
if (unlikely(!try_grab_page(*page, gup_flags))) {
ret = -ENOMEM;
goto unmap;
}
@@ -1067,11 +1058,9 @@ static long __get_user_pages(struct mm_struct *mm,
goto out;
}
if (is_vm_hugetlb_page(vma)) {
if (should_force_cow_break(vma, foll_flags))
foll_flags |= FOLL_WRITE;
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i,
foll_flags, locked);
gup_flags, locked);
if (locked && *locked == 0) {
/*
* We've got a VM_FAULT_RETRY
@@ -1085,10 +1074,6 @@ static long __get_user_pages(struct mm_struct *mm,
continue;
}
}
if (should_force_cow_break(vma, foll_flags))
foll_flags |= FOLL_WRITE;
retry:
/*
* If we have a pending SIGKILL, don't keep faulting pages and
@@ -2689,19 +2674,6 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
return -EFAULT;
/*
* The FAST_GUP case requires FOLL_WRITE even for pure reads,
* because get_user_pages() may need to cause an early COW in
* order to avoid confusing the normal COW routines. So only
* targets that are already writable are safe to do by just
* looking at the page tables.
*
* NOTE! With FOLL_FAST_ONLY we allow read-only gup_fast() here,
* because there is no slow path to fall back on. But you'd
* better be careful about possible COW pages - you'll get _a_
* COW page, but not necessarily the one you intended to get
* depending on what COW event happens after this. COW may break
* the page copy in a random direction.
*
* Disable interrupts. The nested form is used, in order to allow
* full, general purpose use of this routine.
*
@@ -2714,8 +2686,6 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
*/
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && gup_fast_permitted(start, end)) {
unsigned long fast_flags = gup_flags;
if (!(gup_flags & FOLL_FAST_ONLY))
fast_flags |= FOLL_WRITE;
local_irq_save(flags);
gup_pgd_range(addr, end, fast_flags, pages, &nr_pinned);

View File

@@ -1291,12 +1291,13 @@ fallback:
}
/*
* FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
* but only after we've gone through a COW cycle and they are dirty.
* FOLL_FORCE can write to even unwritable pmd's, but only
* after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd));
return pmd_write(pmd) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
}
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,

View File

@@ -2453,6 +2453,10 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
if (vma_is_dax(vma))
return 0;
#ifdef VM_SAO
if (*vm_flags & VM_SAO)
return 0;
#endif
#ifdef VM_SPARC_ADI
if (*vm_flags & VM_SPARC_ADI)
return 0;
@@ -2657,31 +2661,6 @@ again:
goto again;
}
bool reuse_ksm_page(struct page *page,
struct vm_area_struct *vma,
unsigned long address)
{
#ifdef CONFIG_DEBUG_VM
if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
WARN_ON(!page_mapped(page)) ||
WARN_ON(!PageLocked(page))) {
dump_page(page, "reuse_ksm_page");
return false;
}
#endif
if (PageSwapCache(page) || !page_stable_node(page))
return false;
/* Prohibit parallel get_ksm_page() */
if (!page_ref_freeze(page, 1))
return false;
page_move_anon_rmap(page, vma);
page->index = linear_page_index(vma, address);
page_ref_unfreeze(page, 1);
return true;
}
#ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{

View File

@@ -2622,6 +2622,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
update_mmu_cache(vma, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
count_vm_event(PGREUSE);
}
/*
@@ -2927,50 +2928,25 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
* not dirty accountable.
*/
if (PageAnon(vmf->page)) {
int total_map_swapcount;
if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) ||
page_count(vmf->page) != 1))
struct page *page = vmf->page;
/* PageKsm() doesn't necessarily raise the page refcount */
if (PageKsm(page) || page_count(page) != 1)
goto copy;
if (!trylock_page(page))
goto copy;
if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
unlock_page(page);
goto copy;
if (!trylock_page(vmf->page)) {
get_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl);
lock_page(vmf->page);
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (!pte_same(*vmf->pte, vmf->orig_pte)) {
update_mmu_tlb(vma, vmf->address, vmf->pte);
unlock_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl);
put_page(vmf->page);
return 0;
}
put_page(vmf->page);
}
if (PageKsm(vmf->page)) {
bool reused = reuse_ksm_page(vmf->page, vmf->vma,
vmf->address);
unlock_page(vmf->page);
if (!reused)
goto copy;
wp_page_reuse(vmf);
return VM_FAULT_WRITE;
}
if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
if (total_map_swapcount == 1) {
/*
* The page is all ours. Move it to
* our anon_vma so the rmap code will
* not search our parent or siblings.
* Protected against the rmap code by
* the page lock.
*/
page_move_anon_rmap(vmf->page, vma);
}
unlock_page(vmf->page);
wp_page_reuse(vmf);
return VM_FAULT_WRITE;
}
unlock_page(vmf->page);
/*
* Ok, we've got the only map reference, and the only
* page count reference, and the page is locked,
* it's dark out, and we're wearing sunglasses. Hit it.
*/
wp_page_reuse(vmf);
unlock_page(page);
return VM_FAULT_WRITE;
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))) {
return wp_page_shared(vmf);

View File

@@ -1241,6 +1241,7 @@ const char * const vmstat_text[] = {
"pglazyfreed",
"pgrefill",
"pgreuse",
"pgsteal_kswapd",
"pgsteal_direct",
"pgscan_kswapd",