Skip to content

Commit

Permalink
Merge 4.14.239 into android-4.14-stable
Browse files Browse the repository at this point in the history
Changes in 4.14.239
	include/linux/mmdebug.h: make VM_WARN* non-rvals
	mm: add VM_WARN_ON_ONCE_PAGE() macro
	mm/rmap: remove unneeded semicolon in page_not_mapped()
	mm/rmap: use page_not_mapped in try_to_unmap()
	mm/thp: try_to_unmap() use TTU_SYNC for safe splitting
	mm/thp: fix vma_address() if virtual address below file offset
	mm/thp: fix page_address_in_vma() on file THP tails
	mm: thp: replace DEBUG_VM BUG with VM_WARN when unmap fails for split
	mm: page_vma_mapped_walk(): use page for pvmw->page
	mm: page_vma_mapped_walk(): settle PageHuge on entry
	mm: page_vma_mapped_walk(): use pmde for *pvmw->pmd
	mm: page_vma_mapped_walk(): prettify PVMW_MIGRATION block
	mm: page_vma_mapped_walk(): crossing page table boundary
	mm: page_vma_mapped_walk(): add a level of indentation
	mm: page_vma_mapped_walk(): use goto instead of while (1)
	mm: page_vma_mapped_walk(): get vma_address_end() earlier
	mm/thp: fix page_vma_mapped_walk() if THP mapped by ptes
	mm/thp: another PVMW_SYNC fix in page_vma_mapped_walk()
	mm, futex: fix shared futex pgoff on shmem huge page
	scsi: sr: Return appropriate error code when disk is ejected
	drm/nouveau: fix dma_address check for CPU/GPU sync
	kfifo: DECLARE_KIFO_PTR(fifo, u64) does not work on arm 32 bit
	kthread_worker: split code for canceling the delayed work timer
	kthread: prevent deadlock when kthread_mod_delayed_work() races with kthread_cancel_delayed_work_sync()
	xen/events: reset active flag for lateeoi events later
	Linux 4.14.239

Signed-off-by: Greg Kroah-Hartman <[email protected]>
Change-Id: I8bf22702e61b998602c9c195fec19a2b42f89e1b
  • Loading branch information
gregkh committed Jul 11, 2021
2 parents 954b37d + 4e68c9b commit 3220829
Show file tree
Hide file tree
Showing 16 changed files with 280 additions and 178 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 238
SUBLEVEL = 239
EXTRAVERSION =
NAME = Petit Gorille

Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/nouveau/nouveau_bo.c
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;

if (!ttm_dma)
if (!ttm_dma || !ttm_dma->dma_address)
return;

/* Don't waste time looping if the object is coherent */
Expand All @@ -470,7 +470,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;

if (!ttm_dma)
if (!ttm_dma || !ttm_dma->dma_address)
return;

/* Don't waste time looping if the object is coherent */
Expand Down
2 changes: 2 additions & 0 deletions drivers/scsi/sr.c
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
return DISK_EVENT_EJECT_REQUEST;
else if (med->media_event_code == 2)
return DISK_EVENT_MEDIA_CHANGE;
else if (med->media_event_code == 3)
return DISK_EVENT_EJECT_REQUEST;
return 0;
}

Expand Down
23 changes: 19 additions & 4 deletions drivers/xen/events/events_base.c
Original file line number Diff line number Diff line change
Expand Up @@ -524,6 +524,9 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
}

info->eoi_time = 0;

/* is_active hasn't been reset yet, do it now. */
smp_store_release(&info->is_active, 0);
do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
}

Expand Down Expand Up @@ -1780,10 +1783,22 @@ static void lateeoi_ack_dynirq(struct irq_data *data)
struct irq_info *info = info_for_irq(data->irq);
evtchn_port_t evtchn = info ? info->evtchn : 0;

if (VALID_EVTCHN(evtchn)) {
do_mask(info, EVT_MASK_REASON_EOI_PENDING);
ack_dynirq(data);
}
if (!VALID_EVTCHN(evtchn))
return;

do_mask(info, EVT_MASK_REASON_EOI_PENDING);

if (unlikely(irqd_is_setaffinity_pending(data)) &&
likely(!irqd_irq_disabled(data))) {
do_mask(info, EVT_MASK_REASON_TEMPORARY);

clear_evtchn(evtchn);

irq_move_masked_irq(data);

do_unmask(info, EVT_MASK_REASON_TEMPORARY);
} else
clear_evtchn(evtchn);
}

static void lateeoi_mask_ack_dynirq(struct irq_data *data)
Expand Down
16 changes: 0 additions & 16 deletions include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -467,17 +467,6 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}

pgoff_t __basepage_index(struct page *page);

/* Return page->index in PAGE_SIZE units */
static inline pgoff_t basepage_index(struct page *page)
{
if (!PageCompound(page))
return page->index;

return __basepage_index(page);
}

extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
Expand Down Expand Up @@ -572,11 +561,6 @@ static inline int hstate_index(struct hstate *h)
return 0;
}

static inline pgoff_t basepage_index(struct page *page)
{
return page->index;
}

static inline int dissolve_free_huge_page(struct page *page)
{
return 0;
Expand Down
3 changes: 2 additions & 1 deletion include/linux/kfifo.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,8 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
* array is a part of the structure and the fifo type where the array is
* outside of the fifo structure.
*/
#define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo))
#define __is_kfifo_ptr(fifo) \
(sizeof(*fifo) == sizeof(STRUCT_KFIFO_PTR(typeof(*(fifo)->type))))

/**
* DECLARE_KFIFO_PTR - macro to declare a fifo pointer object
Expand Down
21 changes: 17 additions & 4 deletions include/linux/mmdebug.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,17 +37,30 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
#define VM_WARN_ON(cond) WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
#define VM_WARN(cond, format...) WARN(cond, format)
#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
static bool __section(".data.once") __warned; \
int __ret_warn_once = !!(cond); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\
__warned = true; \
WARN_ON(1); \
} \
unlikely(__ret_warn_once); \
})

#define VM_WARN_ON(cond) (void)WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
#define VM_WARN(cond, format...) (void)WARN(cond, format)
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
Expand Down
13 changes: 7 additions & 6 deletions include/linux/pagemap.h
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
}

/*
* Get index of the page with in radix-tree
* Get index of the page within radix-tree (but not for hugetlb pages).
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_index(struct page *page)
Expand All @@ -427,15 +427,16 @@ static inline pgoff_t page_to_index(struct page *page)
return pgoff;
}

extern pgoff_t hugetlb_basepage_index(struct page *page);

/*
* Get the offset in PAGE_SIZE.
* (TODO: hugepage should have ->index in PAGE_SIZE)
* Get the offset in PAGE_SIZE (even for hugetlb pages).
* (TODO: hugetlb pages should have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_pgoff(struct page *page)
{
if (unlikely(PageHeadHuge(page)))
return page->index << compound_order(page);

if (unlikely(PageHuge(page)))
return hugetlb_basepage_index(page);
return page_to_index(page);
}

Expand Down
3 changes: 2 additions & 1 deletion include/linux/rmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,8 @@ enum ttu_flags {
* do a final flush if necessary */
TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
* caller holds it */
TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
TTU_SYNC = 0x200, /* avoid racy checks with PVMW_SYNC */
};

#ifdef CONFIG_MMU
Expand Down
2 changes: 1 addition & 1 deletion kernel/futex.c
Original file line number Diff line number Diff line change
Expand Up @@ -719,7 +719,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)

key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.i_seq = get_inode_sequence_number(inode);
key->shared.pgoff = basepage_index(tail);
key->shared.pgoff = page_to_pgoff(tail);
rcu_read_unlock();
}

Expand Down
77 changes: 51 additions & 26 deletions kernel/kthread.c
Original file line number Diff line number Diff line change
Expand Up @@ -979,37 +979,47 @@ void kthread_flush_work(struct kthread_work *work)
EXPORT_SYMBOL_GPL(kthread_flush_work);

/*
* This function removes the work from the worker queue. Also it makes sure
* that it won't get queued later via the delayed work's timer.
* Make sure that the timer is neither set nor running and could
* not manipulate the work list_head any longer.
*
* The function is called under worker->lock. The lock is temporary
* released but the timer can't be set again in the meantime.
*/
static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
unsigned long *flags)
{
struct kthread_delayed_work *dwork =
container_of(work, struct kthread_delayed_work, work);
struct kthread_worker *worker = work->worker;

/*
* del_timer_sync() must be called to make sure that the timer
* callback is not running. The lock must be temporary released
* to avoid a deadlock with the callback. In the meantime,
* any queuing is blocked by setting the canceling counter.
*/
work->canceling++;
spin_unlock_irqrestore(&worker->lock, *flags);
del_timer_sync(&dwork->timer);
spin_lock_irqsave(&worker->lock, *flags);
work->canceling--;
}

/*
* This function removes the work from the worker queue.
*
* It is called under worker->lock. The caller must make sure that
* the timer used by delayed work is not running, e.g. by calling
* kthread_cancel_delayed_work_timer().
*
* The work might still be in use when this function finishes. See the
* current_work proceed by the worker.
*
* Return: %true if @work was pending and successfully canceled,
* %false if @work was not pending
*/
static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
unsigned long *flags)
static bool __kthread_cancel_work(struct kthread_work *work)
{
/* Try to cancel the timer if exists. */
if (is_dwork) {
struct kthread_delayed_work *dwork =
container_of(work, struct kthread_delayed_work, work);
struct kthread_worker *worker = work->worker;

/*
* del_timer_sync() must be called to make sure that the timer
* callback is not running. The lock must be temporary released
* to avoid a deadlock with the callback. In the meantime,
* any queuing is blocked by setting the canceling counter.
*/
work->canceling++;
spin_unlock_irqrestore(&worker->lock, *flags);
del_timer_sync(&dwork->timer);
spin_lock_irqsave(&worker->lock, *flags);
work->canceling--;
}

/*
* Try to remove the work from a worker list. It might either
* be from worker->work_list or from worker->delayed_work_list.
Expand Down Expand Up @@ -1062,11 +1072,23 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
/* Work must not be used with >1 worker, see kthread_queue_work() */
WARN_ON_ONCE(work->worker != worker);

/* Do not fight with another command that is canceling this work. */
/*
* Temporary cancel the work but do not fight with another command
* that is canceling the work as well.
*
* It is a bit tricky because of possible races with another
* mod_delayed_work() and cancel_delayed_work() callers.
*
* The timer must be canceled first because worker->lock is released
* when doing so. But the work can be removed from the queue (list)
* only when it can be queued again so that the return value can
* be used for reference counting.
*/
kthread_cancel_delayed_work_timer(work, &flags);
if (work->canceling)
goto out;
ret = __kthread_cancel_work(work);

ret = __kthread_cancel_work(work, true, &flags);
fast_queue:
__kthread_queue_delayed_work(worker, dwork, delay);
out:
Expand All @@ -1088,7 +1110,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);

ret = __kthread_cancel_work(work, is_dwork, &flags);
if (is_dwork)
kthread_cancel_delayed_work_timer(work, &flags);

ret = __kthread_cancel_work(work);

if (worker->current_work != work)
goto out_fast;
Expand Down
26 changes: 8 additions & 18 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2324,16 +2324,16 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
static void unmap_page(struct page *page)
{
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
bool unmap_success;
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC;

VM_BUG_ON_PAGE(!PageHead(page), page);

if (PageAnon(page))
ttu_flags |= TTU_SPLIT_FREEZE;

unmap_success = try_to_unmap(page, ttu_flags);
VM_BUG_ON_PAGE(!unmap_success, page);
try_to_unmap(page, ttu_flags);

VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
}

static void remap_page(struct page *page)
Expand Down Expand Up @@ -2587,7 +2587,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
int count, mapcount, extra_pins, ret;
int extra_pins, ret;
bool mlocked;
unsigned long flags;
pgoff_t end;
Expand Down Expand Up @@ -2649,7 +2649,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)

mlocked = PageMlocked(page);
unmap_page(head);
VM_BUG_ON_PAGE(compound_mapcount(head), head);

/* Make sure the page is not on per-CPU pagevec as it takes pin */
if (mlocked)
Expand All @@ -2675,9 +2674,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)

/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&pgdata->split_queue_lock);
count = page_count(head);
mapcount = total_mapcount(head);
if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
if (page_ref_freeze(head, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(head))) {
pgdata->split_queue_len--;
list_del(page_deferred_list(head));
Expand All @@ -2693,16 +2690,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
} else
ret = 0;
} else {
if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
pr_alert("total_mapcount: %u, page_count(): %u\n",
mapcount, count);
if (PageTail(page))
dump_page(head, NULL);
dump_page(page, "total_mapcount(head) > 0");
BUG();
}
spin_unlock(&pgdata->split_queue_lock);
fail: if (mapping)
fail:
if (mapping)
spin_unlock(&mapping->tree_lock);
spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
remap_page(head);
Expand Down
5 changes: 1 addition & 4 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1403,15 +1403,12 @@ int PageHeadHuge(struct page *page_head)
return get_compound_page_dtor(page_head) == free_huge_page;
}

pgoff_t __basepage_index(struct page *page)
pgoff_t hugetlb_basepage_index(struct page *page)
{
struct page *page_head = compound_head(page);
pgoff_t index = page_index(page_head);
unsigned long compound_idx;

if (!PageHuge(page_head))
return page_index(page);

if (compound_order(page_head) >= MAX_ORDER)
compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
else
Expand Down
Loading

0 comments on commit 3220829

Please sign in to comment.