Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew)
Browse files Browse the repository at this point in the history
Merge misc fixes from Andrew Morton:
 "21 patches.

  Subsystems affected by this patch series: MAINTAINERS, mailmap, and mm
  (mlock, pagecache, damon, slub, memcg, hugetlb, and pagecache)"

* emailed patches from Andrew Morton <[email protected]>: (21 commits)
  mm: bdi: initialize bdi_min_ratio when bdi is unregistered
  hugetlbfs: fix issue of preallocation of gigantic pages can't work
  mm/memcg: relocate mod_objcg_mlstate(), get_obj_stock() and put_obj_stock()
  mm/slub: fix endianness bug for alloc/free_traces attributes
  selftests/damon: split test cases
  selftests/damon: test debugfs file reads/writes with huge count
  selftests/damon: test wrong DAMOS condition ranges input
  selftests/damon: test DAMON enabling with empty target_ids case
  selftests/damon: skip test if DAMON is running
  mm/damon/vaddr-test: remove unnecessary variables
  mm/damon/vaddr-test: split a test function having >1024 bytes frame size
  mm/damon/vaddr: remove an unnecessary warning message
  mm/damon/core: remove unnecessary error messages
  mm/damon/dbgfs: remove an unnecessary error message
  mm/damon/core: use better timer mechanisms selection threshold
  mm/damon/core: fix fake load reports due to uninterruptible sleeps
  timers: implement usleep_idle_range()
  filemap: remove PageHWPoison check from next_uptodate_page()
  mailmap: update email address for Guo Ren
  MAINTAINERS: update kdump maintainers
  ...
  • Loading branch information
torvalds committed Dec 11, 2021
2 parents 6f51352 + 3c376df commit df442a4
Show file tree
Hide file tree
Showing 23 changed files with 322 additions and 207 deletions.
2 changes: 2 additions & 0 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,8 @@ Greg Kroah-Hartman <[email protected]>
Greg Kroah-Hartman <[email protected]>
Greg Kurz <[email protected]> <[email protected]>
Gregory CLEMENT <[email protected]> <[email protected]>
Guo Ren <[email protected]> <[email protected]>
Guo Ren <[email protected]> <[email protected]>
Gustavo Padovan <[email protected]>
Gustavo Padovan <[email protected]>
Hanjun Guo <[email protected]> <[email protected]>
Expand Down
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -10279,9 +10279,9 @@ F: lib/Kconfig.kcsan
F: scripts/Makefile.kcsan

KDUMP
M: Dave Young <[email protected]>
M: Baoquan He <[email protected]>
R: Vivek Goyal <[email protected]>
R: Dave Young <[email protected]>
L: [email protected]
S: Maintained
W: http://lse.sourceforge.net/kdump/
Expand Down
14 changes: 13 additions & 1 deletion include/linux/delay.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
*/

#include <linux/math.h>
#include <linux/sched.h>

extern unsigned long loops_per_jiffy;

Expand Down Expand Up @@ -58,7 +59,18 @@ void calibrate_delay(void);
void __attribute__((weak)) calibration_delay_done(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
void usleep_range(unsigned long min, unsigned long max);
void usleep_range_state(unsigned long min, unsigned long max,
unsigned int state);

static inline void usleep_range(unsigned long min, unsigned long max)
{
usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
}

static inline void usleep_idle_range(unsigned long min, unsigned long max)
{
usleep_range_state(min, max, TASK_IDLE);
}

static inline void ssleep(unsigned int seconds)
{
Expand Down
13 changes: 10 additions & 3 deletions include/uapi/linux/resource.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,17 @@ struct rlimit64 {
#define _STK_LIM (8*1024*1024)

/*
* GPG2 wants 64kB of mlocked memory, to make sure pass phrases
* and other sensitive information are never written to disk.
* Limit the amount of locked memory by some sane default:
* root can always increase this limit if needed.
*
* The main use-cases are (1) preventing sensitive memory
* from being swapped; (2) real-time operations; (3) via
* IOURING_REGISTER_BUFFERS.
*
* The first two don't need much. The latter will take as
* much as it can get. 8MB is a reasonably sane default.
*/
#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
#define MLOCK_LIMIT (8*1024*1024)

/*
* Due to binary compatibility, the actual resource numbers
Expand Down
16 changes: 9 additions & 7 deletions kernel/time/timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -2054,26 +2054,28 @@ unsigned long msleep_interruptible(unsigned int msecs)
EXPORT_SYMBOL(msleep_interruptible);

/**
* usleep_range - Sleep for an approximate time
* @min: Minimum time in usecs to sleep
* @max: Maximum time in usecs to sleep
* usleep_range_state - Sleep for an approximate time in a given state
* @min: Minimum time in usecs to sleep
* @max: Maximum time in usecs to sleep
* @state: State of the current task that will be while sleeping
*
* In non-atomic context where the exact wakeup time is flexible, use
* usleep_range() instead of udelay(). The sleep improves responsiveness
* usleep_range_state() instead of udelay(). The sleep improves responsiveness
* by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
* power usage by allowing hrtimers to take advantage of an already-
* scheduled interrupt instead of scheduling a new one just for this sleep.
*/
void __sched usleep_range(unsigned long min, unsigned long max)
void __sched usleep_range_state(unsigned long min, unsigned long max,
unsigned int state)
{
ktime_t exp = ktime_add_us(ktime_get(), min);
u64 delta = (u64)(max - min) * NSEC_PER_USEC;

for (;;) {
__set_current_state(TASK_UNINTERRUPTIBLE);
__set_current_state(state);
/* Do not return before the requested sleep time has elapsed */
if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
break;
}
}
EXPORT_SYMBOL(usleep_range);
EXPORT_SYMBOL(usleep_range_state);
7 changes: 7 additions & 0 deletions mm/backing-dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
wb_shutdown(&bdi->wb);
cgwb_bdi_unregister(bdi);

/*
* If this BDI's min ratio has been set, use bdi_set_min_ratio() to
* update the global bdi_min_ratio.
*/
if (bdi->min_ratio)
bdi_set_min_ratio(bdi, 0);

if (bdi->dev) {
bdi_debug_unregister(bdi);
device_unregister(bdi->dev);
Expand Down
20 changes: 7 additions & 13 deletions mm/damon/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx,
for (i = 0; i < nr_ids; i++) {
t = damon_new_target(ids[i]);
if (!t) {
pr_err("Failed to alloc damon_target\n");
/* The caller should do cleanup of the ids itself */
damon_for_each_target_safe(t, next, ctx)
damon_destroy_target(t);
Expand Down Expand Up @@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
unsigned long aggr_int, unsigned long primitive_upd_int,
unsigned long min_nr_reg, unsigned long max_nr_reg)
{
if (min_nr_reg < 3) {
pr_err("min_nr_regions (%lu) must be at least 3\n",
min_nr_reg);
if (min_nr_reg < 3)
return -EINVAL;
}
if (min_nr_reg > max_nr_reg) {
pr_err("invalid nr_regions. min (%lu) > max (%lu)\n",
min_nr_reg, max_nr_reg);
if (min_nr_reg > max_nr_reg)
return -EINVAL;
}

ctx->sample_interval = sample_int;
ctx->aggr_interval = aggr_int;
Expand Down Expand Up @@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme)

static void kdamond_usleep(unsigned long usecs)
{
if (usecs > 100 * 1000)
schedule_timeout_interruptible(usecs_to_jiffies(usecs));
/* See Documentation/timers/timers-howto.rst for the thresholds */
if (usecs > 20 * USEC_PER_MSEC)
schedule_timeout_idle(usecs_to_jiffies(usecs));
else
usleep_range(usecs, usecs + 1);
usleep_idle_range(usecs, usecs + 1);
}

/* Returns negative error code if it's not activated but should return */
Expand Down Expand Up @@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data)
ctx->callback.after_sampling(ctx))
done = true;

usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
kdamond_usleep(ctx->sample_interval);

if (ctx->primitive.check_accesses)
max_nr_accesses = ctx->primitive.check_accesses(ctx);
Expand Down
4 changes: 1 addition & 3 deletions mm/damon/dbgfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
&wmarks.low, &parsed);
if (ret != 18)
break;
if (!damos_action_valid(action)) {
pr_err("wrong action %d\n", action);
if (!damos_action_valid(action))
goto fail;
}

pos += parsed;
scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
Expand Down
79 changes: 37 additions & 42 deletions mm/damon/vaddr-test.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
struct damon_addr_range *three_regions,
unsigned long *expected, int nr_expected)
{
struct damon_ctx *ctx = damon_new_ctx();
struct damon_target *t;
struct damon_region *r;
int i;
Expand All @@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
damon_add_region(r, t);
}
damon_add_target(ctx, t);

damon_va_apply_three_regions(t, three_regions);

Expand All @@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
}

damon_destroy_ctx(ctx);
}

/*
Expand Down Expand Up @@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test)
new_three_regions, expected, ARRAY_SIZE(expected));
}

static void damon_test_split_evenly(struct kunit *test)
static void damon_test_split_evenly_fail(struct kunit *test,
unsigned long start, unsigned long end, unsigned int nr_pieces)
{
struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
struct damon_region *r;
unsigned long i;

KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
-EINVAL);

t = damon_new_target(42);
r = damon_new_region(0, 100);
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL);
struct damon_target *t = damon_new_target(42);
struct damon_region *r = damon_new_region(start, end);

damon_add_region(r, t);
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u);
KUNIT_EXPECT_EQ(test,
damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);

i = 0;
damon_for_each_region(r, t) {
KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10);
KUNIT_EXPECT_EQ(test, r->ar.end, i * 10);
KUNIT_EXPECT_EQ(test, r->ar.start, start);
KUNIT_EXPECT_EQ(test, r->ar.end, end);
}

damon_free_target(t);
}

static void damon_test_split_evenly_succ(struct kunit *test,
unsigned long start, unsigned long end, unsigned int nr_pieces)
{
struct damon_target *t = damon_new_target(42);
struct damon_region *r = damon_new_region(start, end);
unsigned long expected_width = (end - start) / nr_pieces;
unsigned long i = 0;

t = damon_new_target(42);
r = damon_new_region(5, 59);
damon_add_region(r, t);
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
KUNIT_EXPECT_EQ(test,
damon_va_evenly_split_region(t, r, nr_pieces), 0);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);

i = 0;
damon_for_each_region(r, t) {
if (i == 4)
if (i == nr_pieces - 1)
break;
KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++);
KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i);
KUNIT_EXPECT_EQ(test,
r->ar.start, start + i++ * expected_width);
KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
}
KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i);
KUNIT_EXPECT_EQ(test, r->ar.end, 59ul);
KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width);
KUNIT_EXPECT_EQ(test, r->ar.end, end);
damon_free_target(t);
}

t = damon_new_target(42);
r = damon_new_region(5, 6);
damon_add_region(r, t);
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
static void damon_test_split_evenly(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
-EINVAL);

damon_for_each_region(r, t) {
KUNIT_EXPECT_EQ(test, r->ar.start, 5ul);
KUNIT_EXPECT_EQ(test, r->ar.end, 6ul);
}
damon_free_target(t);
damon_destroy_ctx(c);
damon_test_split_evenly_fail(test, 0, 100, 0);
damon_test_split_evenly_succ(test, 0, 100, 10);
damon_test_split_evenly_succ(test, 5, 59, 5);
damon_test_split_evenly_fail(test, 5, 6, 2);
}

static struct kunit_case damon_test_cases[] = {
Expand Down
1 change: 0 additions & 1 deletion mm/damon/vaddr.c
Original file line number Diff line number Diff line change
Expand Up @@ -627,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
case DAMOS_STAT:
return 0;
default:
pr_warn("Wrong action %d\n", scheme->action);
return -EINVAL;
}

Expand Down
2 changes: 0 additions & 2 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page,
goto skip;
if (!PageUptodate(page) || PageReadahead(page))
goto skip;
if (PageHWPoison(page))
goto skip;
if (!trylock_page(page))
goto skip;
if (page->mapping != mapping)
Expand Down
2 changes: 1 addition & 1 deletion mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -2973,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
struct huge_bootmem_page *m = NULL; /* initialize for clang */
int nr_nodes, node;

if (nid >= nr_online_nodes)
if (nid != NUMA_NO_NODE && nid >= nr_online_nodes)
return 0;
/* do node specific alloc */
if (nid != NUMA_NO_NODE) {
Expand Down
Loading

0 comments on commit df442a4

Please sign in to comment.