From cf7797ea60e3e721e3ae5090edbc2ec72d715436 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 29 Aug 2020 12:26:01 +0100 Subject: [PATCH 01/72] HID: core: Correctly handle ReportSize being zero commit bce1305c0ece3dc549663605e567655dd701752c upstream. It appears that a ReportSize value of zero is legal, even if a bit non-sensical. Most of the HID code seems to handle that gracefully, except when computing the total size in bytes. When fed as input to memset, this leads to some funky outcomes. Detect the corner case and correctly compute the size. Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier Signed-off-by: Benjamin Tissoires Signed-off-by: Greg Kroah-Hartman --- drivers/hid/hid-core.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index b4b9d8152536..d99c9ed5dfe3 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1406,6 +1406,17 @@ static void hid_output_field(const struct hid_device *hid, } } +/* + * Compute the size of a report. + */ +static size_t hid_compute_report_size(struct hid_report *report) +{ + if (report->size) + return ((report->size - 1) >> 3) + 1; + + return 0; +} + /* * Create a report. 'data' has to be allocated using * hid_alloc_report_buf() so that it has proper size. @@ -1418,7 +1429,7 @@ void hid_output_report(struct hid_report *report, __u8 *data) if (report->id > 0) *data++ = report->id; - memset(data, 0, ((report->size - 1) >> 3) + 1); + memset(data, 0, hid_compute_report_size(report)); for (n = 0; n < report->maxfield; n++) hid_output_field(report->device, report->field[n], data); } @@ -1545,7 +1556,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, csize--; } - rsize = ((report->size - 1) >> 3) + 1; + rsize = hid_compute_report_size(report); if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) rsize = HID_MAX_BUFFER_SIZE - 1; From ac48d8300edd1aa4ce0fbef0ff5136d363f44cdf Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 1 Sep 2020 10:52:33 +0100 Subject: [PATCH 02/72] HID: core: Sanitize event code and type when mapping input commit 35556bed836f8dc07ac55f69c8d17dce3e7f0e25 upstream. When calling into hid_map_usage(), the passed event code is blindly stored as is, even if it doesn't fit in the associated bitmap. This event code can come from a variety of sources, including devices masquerading as input devices, only a bit more "programmable". Instead of taking the event code at face value, check that it actually fits the corresponding bitmap, and if it doesn't: - spit out a warning so that we know which device is acting up - NULLify the bitmap pointer so that we catch unexpected uses Code paths that can make use of untrusted inputs can now check that the mapping was indeed correct and bail out if not. Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier Signed-off-by: Benjamin Tissoires Signed-off-by: Greg Kroah-Hartman --- drivers/hid/hid-input.c | 4 ++++ drivers/hid/hid-multitouch.c | 2 ++ include/linux/hid.h | 42 +++++++++++++++++++++++++----------- 3 files changed, 35 insertions(+), 13 deletions(-) diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 26e967730997..5e1a51ba6500 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1026,6 +1026,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: + /* Mapping failed, bail out */ + if (!bit) + return; + if (device->driver->input_mapped && device->driver->input_mapped(device, hidinput, field, usage, &bit, &max) < 0) { diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 1207102823de..258a50ec1572 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -567,6 +567,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, case HID_UP_BUTTON: code = BTN_MOUSE + ((usage->hid - 1) & HID_USAGE); hid_map_usage(hi, usage, bit, max, EV_KEY, code); + if (!*bit) + return -1; input_set_capability(hi->input, EV_KEY, code); return 1; diff --git a/include/linux/hid.h b/include/linux/hid.h index eda06f7ee84a..981657075f05 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -874,34 +874,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) { * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type + * + * The value pointed to by @bit will be set to NULL if either @type is + * an unhandled event type, or if @c is out of range for @type. This + * can be used as an error condition. */ static inline void hid_map_usage(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, - __u8 type, __u16 c) + __u8 type, unsigned int c) { struct input_dev *input = hidinput->input; - - usage->type = type; - usage->code = c; + unsigned long *bmap = NULL; + unsigned int limit = 0; switch (type) { case EV_ABS: - *bit = input->absbit; - *max = ABS_MAX; + bmap = input->absbit; + limit = ABS_MAX; break; case EV_REL: - *bit = input->relbit; - *max = REL_MAX; + bmap = input->relbit; + limit = REL_MAX; break; case EV_KEY: - *bit = input->keybit; - *max = KEY_MAX; + bmap = input->keybit; + limit = KEY_MAX; break; case EV_LED: - *bit = input->ledbit; - *max = LED_MAX; + bmap = input->ledbit; + limit = LED_MAX; break; } + + if (unlikely(c > limit || !bmap)) { + pr_warn_ratelimited("%s: Invalid code %d type %d\n", + input->name, c, type); + *bit = NULL; + return; + } + + usage->type = type; + usage->code = c; + *max = limit; + *bit = bmap; } /** @@ -915,7 +930,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput, __u8 type, __u16 c) { hid_map_usage(hidinput, usage, bit, max, type, c); - clear_bit(c, *bit); + if (*bit) + clear_bit(usage->code, *bit); } /** From d6d8c21e7db3cb9f6c26b76b1e878f96c10cabf8 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Tue, 1 Sep 2020 16:58:53 -0500 Subject: [PATCH 03/72] perf record/stat: Explicitly call out event modifiers in the documentation commit e48a73a312ebf19cc3d72aa74985db25c30757c1 upstream. Event modifiers are not mentioned in the perf record or perf stat manpages. Add them to orient new users more effectively by pointing them to the perf list manpage for details. Fixes: 2055fdaf8703 ("perf list: Document precise event sampling for AMD IBS") Signed-off-by: Kim Phillips Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexey Budankov Cc: Ian Rogers Cc: Jin Yao Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tony Jones Cc: stable@vger.kernel.org Link: http://lore.kernel.org/lkml/20200901215853.276234-1-kim.phillips@amd.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Greg Kroah-Hartman --- tools/perf/Documentation/perf-record.txt | 4 ++++ tools/perf/Documentation/perf-stat.txt | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 92335193dc33..d443ca3abf27 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -33,6 +33,10 @@ OPTIONS - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where 'param1', 'param2', etc are defined as formats for the PMU in /sys/bus/event_source/devices//format/*. diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index d96ccd4844df..b099ac1de854 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -39,6 +39,10 @@ report:: - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed event like 'pmu/param1=0x3,param2/' where param1 and param2 are defined as formats for the PMU in /sys/bus/event_sources/devices//format/* From ccf676d5103bcb02d83574b636af43fcccb74e80 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Thu, 20 Aug 2020 06:19:32 -0700 Subject: [PATCH 04/72] hwmon: (applesmc) check status earlier. [ Upstream commit cecf7560f00a8419396a2ed0f6e5d245ccb4feac ] clang static analysis reports this representative problem applesmc.c:758:10: warning: 1st function call argument is an uninitialized value left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ buffer is filled by the earlier call ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, ... This problem is reported because a goto skips the status check. Other similar problems use data from applesmc_read_key before checking the status. So move the checks to before the use. Signed-off-by: Tom Rix Reviewed-by: Henrik Rydberg Link: https://lore.kernel.org/r/20200820131932.10590-1-trix@redhat.com Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/applesmc.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 0af7fd311979..587fc5c686b3 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -758,15 +758,18 @@ static ssize_t applesmc_light_show(struct device *dev, } ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); + if (ret) + goto out; /* newer macbooks report a single 10-bit bigendian value */ if (data_length == 10) { left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; goto out; } left = buffer[2]; + + ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); if (ret) goto out; - ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); right = buffer[2]; out: @@ -814,12 +817,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr)); ret = applesmc_read_key(newkey, buffer, 2); - speed = ((buffer[0] << 8 | buffer[1]) >> 2); - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); + + speed = ((buffer[0] << 8 | buffer[1]) >> 2); + return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); } static ssize_t applesmc_store_fan_speed(struct device *dev, @@ -854,12 +856,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev, u8 buffer[2]; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); + + manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); } static ssize_t applesmc_store_fan_manual(struct device *dev, @@ -875,10 +876,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, return -EINVAL; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - val = (buffer[0] << 8 | buffer[1]); if (ret) goto out; + val = (buffer[0] << 8 | buffer[1]); + if (input) val = val | (0x01 << to_index(attr)); else @@ -954,13 +956,12 @@ static ssize_t applesmc_key_count_show(struct device *dev, u32 count; ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); - count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + - ((u32)buffer[2]<<8) + buffer[3]; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); + + count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + + ((u32)buffer[2]<<8) + buffer[3]; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); } static ssize_t applesmc_key_at_index_read_show(struct device *dev, From 20696cd1723693124979f8cdcb6ed65e30b32e60 Mon Sep 17 00:00:00 2001 From: Amit Engel Date: Wed, 19 Aug 2020 11:31:11 +0300 Subject: [PATCH 05/72] nvmet: Disable keep-alive timer when kato is cleared to 0h [ Upstream commit 0d3b6a8d213a30387b5104b2fb25376d18636f23 ] Based on nvme spec, when keep alive timeout is set to zero the keep-alive timer should be disabled. Signed-off-by: Amit Engel Signed-off-by: Sagi Grimberg Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- drivers/nvme/target/core.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 96ea6c76be6e..63b87a847276 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -205,6 +205,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work) static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); @@ -214,6 +217,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); cancel_delayed_work_sync(&ctrl->ka_work); From 7627de7f3379ba988cfafb4cc4ba57265228dff8 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 20 Aug 2020 11:00:26 -0400 Subject: [PATCH 06/72] ceph: don't allow setlease on cephfs [ Upstream commit 496ceaf12432b3d136dcdec48424312e71359ea7 ] Leases don't currently work correctly on kcephfs, as they are not broken when caps are revoked. They could eventually be implemented similarly to how we did them in libcephfs, but for now don't allow them. [ idryomov: no need for simple_nosetlease() in ceph_dir_fops and ceph_snapdir_fops ] Signed-off-by: Jeff Layton Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov Signed-off-by: Sasha Levin --- fs/ceph/file.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index e7ddb23d9bb7..e818344a052c 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1773,6 +1773,7 @@ const struct file_operations ceph_file_fops = { .mmap = ceph_mmap, .fsync = ceph_fsync, .lock = ceph_lock, + .setlease = simple_nosetlease, .flock = ceph_flock, .splice_write = iter_file_splice_write, .unlocked_ioctl = ceph_ioctl, From 73888a8f8ceebddb34460ea82a6e106c1ee83c3e Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Thu, 20 Aug 2020 09:48:23 +0200 Subject: [PATCH 07/72] s390: don't trace preemption in percpu macros [ Upstream commit 1196f12a2c960951d02262af25af0bb1775ebcc2 ] Since commit a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables") the lockdep code itself uses percpu variables. This leads to recursions because the percpu macros are calling preempt_enable() which might call trace_preempt_on(). Signed-off-by: Sven Schnelle Reviewed-by: Vasily Gorbik Signed-off-by: Vasily Gorbik Signed-off-by: Sasha Levin --- arch/s390/include/asm/percpu.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 90240dfef76a..5889c1ed84c4 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -28,7 +28,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ old__, new__, prev__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ prev__ = *ptr__; \ do { \ @@ -36,7 +36,7 @@ new__ = old__ op (val); \ prev__ = cmpxchg(ptr__, old__, new__); \ } while (prev__ != old__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ new__; \ }) @@ -67,7 +67,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ if (__builtin_constant_p(val__) && \ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ @@ -83,7 +83,7 @@ : [val__] "d" (val__) \ : "cc"); \ } \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int) @@ -94,14 +94,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ old__ + val__; \ }) @@ -113,14 +113,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan") @@ -135,10 +135,10 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ ret__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = cmpxchg(ptr__, oval, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -151,10 +151,10 @@ ({ \ typeof(pcp) *ptr__; \ typeof(pcp) ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = xchg(ptr__, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -170,11 +170,11 @@ typeof(pcp1) *p1__; \ typeof(pcp2) *p2__; \ int ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ p1__ = raw_cpu_ptr(&(pcp1)); \ p2__ = raw_cpu_ptr(&(pcp2)); \ ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) From 921a52280f510d7520719ab81f3f7aefa1aa85f0 Mon Sep 17 00:00:00 2001 From: Simon Leiner Date: Tue, 25 Aug 2020 11:31:52 +0200 Subject: [PATCH 08/72] xen/xenbus: Fix granting of vmalloc'd memory [ Upstream commit d742db70033c745e410523e00522ee0cfe2aa416 ] On some architectures (like ARM), virt_to_gfn cannot be used for vmalloc'd memory because of its reliance on virt_to_phys. This patch introduces a check for vmalloc'd addresses and obtains the PFN using vmalloc_to_pfn in that case. Signed-off-by: Simon Leiner Reviewed-by: Stefano Stabellini Link: https://lore.kernel.org/r/20200825093153.35500-1-simon@leiner.me Signed-off-by: Juergen Gross Signed-off-by: Sasha Levin --- drivers/xen/xenbus/xenbus_client.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index df27cefb2fa3..266f446ba331 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -384,8 +384,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, int i, j; for (i = 0; i < nr_pages; i++) { - err = gnttab_grant_foreign_access(dev->otherend_id, - virt_to_gfn(vaddr), 0); + unsigned long gfn; + + if (is_vmalloc_addr(vaddr)) + gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr)); + else + gfn = virt_to_gfn(vaddr); + + err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); if (err < 0) { xenbus_dev_fatal(dev, err, "granting access to ring page"); From 942a35dabe83cc3d7ad3b7362435d85226d1dea5 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Thu, 6 Aug 2020 13:49:28 +0300 Subject: [PATCH 09/72] dmaengine: of-dma: Fix of_dma_router_xlate's of_dma_xlate handling [ Upstream commit 5b2aa9f918f6837ae943557f8cec02c34fcf80e7 ] of_dma_xlate callback can return ERR_PTR as well NULL in case of failure. If error code is returned (not NULL) then the route should be released and the router should not be registered for the channel. Fixes: 56f13c0d9524c ("dmaengine: of_dma: Support for DMA routers") Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200806104928.25975-1-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/of-dma.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index faae0bfe1109..757cf48c1c5e 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -72,12 +72,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, return NULL; chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); - if (chan) { - chan->router = ofdma->dma_router; - chan->route_data = route_data; - } else { + if (IS_ERR_OR_NULL(chan)) { ofdma->dma_router->route_free(ofdma->dma_router->dev, route_data); + } else { + chan->router = ofdma->dma_router; + chan->route_data = route_data; } /* From 79d62daab036907f565dc38e74be1233d683cc7f Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 22 Jul 2020 20:36:43 +0200 Subject: [PATCH 10/72] batman-adv: Avoid uninitialized chaddr when handling DHCP [ Upstream commit 303216e76dcab6049c9d42390b1032f0649a8206 ] The gateway client code can try to optimize the delivery of DHCP packets to avoid broadcasting them through the whole mesh. But also transmissions to the client can be optimized by looking up the destination via the chaddr of the DHCP packet. But the chaddr is currently only done when chaddr is fully inside the non-paged area of the skbuff. Otherwise it will not be initialized and the unoptimized path should have been taken. But the implementation didn't handle this correctly. It didn't retrieve the correct chaddr but still tried to perform the TT lookup with this uninitialized memory. Reported-by: syzbot+ab16e463b903f5a37036@syzkaller.appspotmail.com Fixes: 6c413b1c22a2 ("batman-adv: send every DHCP packet as bat-unicast") Signed-off-by: Sven Eckelmann Acked-by: Antonio Quartulli Signed-off-by: Simon Wunderlich Signed-off-by: Sasha Levin --- net/batman-adv/gateway_client.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 3bd7ed6b6b3e..9727afc030d8 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -673,8 +673,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET; /* store the client address if the message is going to a client */ - if (ret == BATADV_DHCP_TO_CLIENT && - pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) { + if (ret == BATADV_DHCP_TO_CLIENT) { + if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) + return BATADV_DHCP_NO; + /* check if the DHCP packet carries an Ethernet DHCP */ p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET; if (*p != BATADV_DHCP_HTYPE_ETHERNET) From 6f67b9d38ab08c89ef5865ab8c60cecb49fee382 Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Tue, 18 Aug 2020 17:46:10 +0300 Subject: [PATCH 11/72] batman-adv: bla: use netif_rx_ni when not in interrupt context [ Upstream commit 279e89b2281af3b1a9f04906e157992c19c9f163 ] batadv_bla_send_claim() gets called from worker thread context through batadv_bla_periodic_work(), thus netif_rx_ni needs to be used in that case. This fixes "NOHZ: local_softirq_pending 08" log messages seen when batman-adv is enabled. Fixes: 23721387c409 ("batman-adv: add basic bridge loop avoidance code") Signed-off-by: Jussi Kivilinna Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Sasha Levin --- net/batman-adv/bridge_loop_avoidance.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 00123064eb26..e545b42ab0b9 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -451,7 +451,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, skb->len + ETH_HLEN); soft_iface->last_rx = jiffies; - netif_rx(skb); + if (in_interrupt()) + netif_rx(skb); + else + netif_rx_ni(skb); out: if (primary_if) batadv_hardif_put(primary_if); From a073dabeea79c4df630ede960e79c20b44c20f91 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Mon, 17 Aug 2020 19:57:26 +0800 Subject: [PATCH 12/72] dmaengine: at_hdmac: check return value of of_find_device_by_node() in at_dma_xlate() [ Upstream commit 0cef8e2c5a07d482ec907249dbd6687e8697677f ] The reurn value of of_find_device_by_node() is not checked, thus null pointer dereference will be triggered if of_find_device_by_node() failed. Fixes: bbe89c8e3d59 ("at_hdmac: move to generic DMA binding") Signed-off-by: Yu Kuai Link: https://lore.kernel.org/r/20200817115728.1706719-2-yukuai3@huawei.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/at_hdmac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index a32cd71f94bb..cb72b8c915c7 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1810,6 +1810,8 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, return NULL; dmac_pdev = of_find_device_by_node(dma_spec->np); + if (!dmac_pdev) + return NULL; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); From 334dc5f712518f7ca791209569cb85bfa3b899e4 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 19 Aug 2020 11:26:44 -0700 Subject: [PATCH 13/72] MIPS: mm: BMIPS5000 has inclusive physical caches [ Upstream commit dbfc95f98f0158958d1f1e6bf06d74be38dbd821 ] When the BMIPS generic cpu-feature-overrides.h file was introduced, cpu_has_inclusive_caches/MIPS_CPU_INCLUSIVE_CACHES was not set for BMIPS5000 CPUs. Correct this when we have initialized the MIPS secondary cache successfully. Fixes: f337967d6d87 ("MIPS: BMIPS: Add cpu-feature-overrides.h") Signed-off-by: Florian Fainelli Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/mm/c-r4k.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 0ff379f0cc4a..cb877f86f5fc 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1746,7 +1746,11 @@ static void setup_scache(void) printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); + + if (current_cpu_type() == CPU_BMIPS5000) + c->options |= MIPS_CPU_INCLUSIVE_CACHES; } + #else if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); From 39f2dc7b03fe3d79d0df78503e95582d27a047d8 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 19 Aug 2020 11:26:45 -0700 Subject: [PATCH 14/72] MIPS: BMIPS: Also call bmips_cpu_setup() for secondary cores [ Upstream commit e14f633b66902615cf7faa5d032b45ab8b6fb158 ] The initialization done by bmips_cpu_setup() typically affects both threads of a given core, on 7435 which supports 2 cores and 2 threads, logical CPU number 2 and 3 would not run this initialization. Fixes: 738a3f79027b ("MIPS: BMIPS: Add early CPU initialization code") Signed-off-by: Florian Fainelli Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/kernel/smp-bmips.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 416d53f587e7..6e3671752775 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -236,6 +236,8 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) */ static void bmips_init_secondary(void) { + bmips_cpu_setup(); + switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: From 76a2878f460e1544ffeebf9af655234ed38ea599 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 20 Aug 2020 14:12:54 +0200 Subject: [PATCH 15/72] netfilter: nf_tables: add NFTA_SET_USERDATA if not null [ Upstream commit 6f03bf43ee05b31d3822def2a80f11b3591c55b3 ] Kernel sends an empty NFTA_SET_USERDATA attribute with no value if userspace adds a set with no NFTA_SET_USERDATA attribute. Fixes: e6d8ecac9e68 ("netfilter: nf_tables: Add new attributes into nft_set to store user data.") Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin --- net/netfilter/nf_tables_api.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2fa1c4f2e94e..ec460aedfc61 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2592,7 +2592,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, goto nla_put_failure; } - if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) + if (set->udata && + nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) goto nla_put_failure; desc = nla_nest_start(skb, NFTA_SET_DESC); From 0cd49365e148e25ef38ea8a6d1a41a72cac12ea0 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 20 Aug 2020 14:12:55 +0200 Subject: [PATCH 16/72] netfilter: nf_tables: incorrect enum nft_list_attributes definition [ Upstream commit da9125df854ea48a6240c66e8a67be06e2c12c03 ] This should be NFTA_LIST_UNSPEC instead of NFTA_LIST_UNPEC, all other similar attribute definitions are postfixed with _UNSPEC. Fixes: 96518518cc41 ("netfilter: add nftables") Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin --- include/uapi/linux/netfilter/nf_tables.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index c6c4477c136b..d121c22bf928 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -114,7 +114,7 @@ enum nf_tables_msg_types { * @NFTA_LIST_ELEM: list element (NLA_NESTED) */ enum nft_list_attributes { - NFTA_LIST_UNPEC, + NFTA_LIST_UNSPEC, NFTA_LIST_ELEM, __NFTA_LIST_MAX }; From f08569eb18b5c402ff0e03b92e5ed6d5e64f7622 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 20 Aug 2020 21:05:50 +0200 Subject: [PATCH 17/72] netfilter: nf_tables: fix destination register zeroing [ Upstream commit 1e105e6afa6c3d32bfb52c00ffa393894a525c27 ] Following bug was reported via irc: nft list ruleset set knock_candidates_ipv4 { type ipv4_addr . inet_service size 65535 elements = { 127.0.0.1 . 123, 127.0.0.1 . 123 } } .. udp dport 123 add @knock_candidates_ipv4 { ip saddr . 123 } udp dport 123 add @knock_candidates_ipv4 { ip saddr . udp dport } It should not have been possible to add a duplicate set entry. After some debugging it turned out that the problem is the immediate value (123) in the second-to-last rule. Concatenations use 32bit registers, i.e. the elements are 8 bytes each, not 6 and it turns out the kernel inserted inet firewall @knock_candidates_ipv4 element 0100007f ffff7b00 : 0 [end] element 0100007f 00007b00 : 0 [end] Note the non-zero upper bits of the first element. It turns out that nft_immediate doesn't zero the destination register, but this is needed when the length isn't a multiple of 4. Furthermore, the zeroing in nft_payload is broken. We can't use [len / 4] = 0 -- if len is a multiple of 4, index is off by one. Skip zeroing in this case and use a conditional instead of (len -1) / 4. Fixes: 49499c3e6e18 ("netfilter: nf_tables: switch registers to 32 bit addressing") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin --- include/net/netfilter/nf_tables.h | 2 ++ net/netfilter/nft_payload.c | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 7ba9a624090f..91e395fd0a65 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -119,6 +119,8 @@ static inline u8 nft_reg_load8(u32 *sreg) static inline void nft_data_copy(u32 *dst, const struct nft_data *src, unsigned int len) { + if (len % NFT_REG32_SIZE) + dst[len / NFT_REG32_SIZE] = 0; memcpy(dst, src, len); } diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index b2f88617611a..f73d47b3ffb7 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -74,7 +74,9 @@ static void nft_payload_eval(const struct nft_expr *expr, u32 *dest = ®s->data[priv->dreg]; int offset; - dest[priv->len / NFT_REG32_SIZE] = 0; + if (priv->len % NFT_REG32_SIZE) + dest[priv->len / NFT_REG32_SIZE] = 0; + switch (priv->base) { case NFT_PAYLOAD_LL_HEADER: if (!skb_mac_header_was_set(skb)) From fe46ff0c50b7df57371d6b0f218f6a87c1f3ad1e Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Mon, 24 Aug 2020 13:44:42 +0800 Subject: [PATCH 18/72] net: hns: Fix memleak in hns_nic_dev_probe [ Upstream commit 100e3345c6e719d2291e1efd5de311cc24bb9c0b ] hns_nic_dev_probe allocates ndev, but not free it on two error handling paths, which may lead to memleak. Fixes: 63434888aaf1b ("net: hns: net: hns: enet adds support of acpi") Signed-off-by: Dinghao Liu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 24a815997ec5..796f81106b43 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1990,8 +1990,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->enet_ver = AE_VERSION_1; else if (acpi_dev_found(hns_enet_acpi_match[1].id)) priv->enet_ver = AE_VERSION_2; - else - return -ENXIO; + else { + ret = -ENXIO; + goto out_read_prop_fail; + } /* try to find port-idx-in-ae first */ ret = acpi_node_get_property_reference(dev->fwnode, @@ -2003,7 +2005,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->fwnode = acpi_fwnode_handle(args.adev); } else { dev_err(dev, "cannot read cfg data from OF or acpi\n"); - return -ENXIO; + ret = -ENXIO; + goto out_read_prop_fail; } ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); From 1f952fce59241d068ebcc434cc977f372e09a5d0 Mon Sep 17 00:00:00 2001 From: Yuusuke Ashizuka Date: Thu, 20 Aug 2020 18:43:07 +0900 Subject: [PATCH 19/72] ravb: Fixed to be able to unload modules [ Upstream commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc ] When this driver is built as a module, I cannot rmmod it after insmoding it. This is because that this driver calls ravb_mdio_init() at the time of probe, and module->refcnt is incremented by alloc_mdio_bitbang() called after that. Therefore, even if ifup is not performed, the driver is in use and rmmod cannot be performed. $ lsmod Module Size Used by ravb 40960 1 $ rmmod ravb rmmod: ERROR: Module ravb is in use Call ravb_mdio_init() at open and free_mdio_bitbang() at close, thereby rmmod is possible in the ifdown state. Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") Signed-off-by: Yuusuke Ashizuka Reviewed-by: Sergei Shtylyov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/renesas/ravb_main.c | 110 +++++++++++------------ 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 93d3152752ff..a5de56bcbac0 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1336,6 +1336,51 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, return error; } +/* MDIO bus init function */ +static int ravb_mdio_init(struct ravb_private *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int error; + + /* Bitbang init */ + priv->mdiobb.ops = &bb_ops; + + /* MII controller setting */ + priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); + if (!priv->mii_bus) + return -ENOMEM; + + /* Hook up MII support for ethtool */ + priv->mii_bus->name = "ravb_mii"; + priv->mii_bus->parent = dev; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register MDIO bus */ + error = of_mdiobus_register(priv->mii_bus, dev->of_node); + if (error) + goto out_free_bus; + + return 0; + +out_free_bus: + free_mdio_bitbang(priv->mii_bus); + return error; +} + +/* MDIO bus release function */ +static int ravb_mdio_release(struct ravb_private *priv) +{ + /* Unregister mdio bus */ + mdiobus_unregister(priv->mii_bus); + + /* Free bitbang info */ + free_mdio_bitbang(priv->mii_bus); + + return 0; +} + /* Network device open function for Ethernet AVB */ static int ravb_open(struct net_device *ndev) { @@ -1344,6 +1389,13 @@ static int ravb_open(struct net_device *ndev) struct device *dev = &pdev->dev; int error; + /* MDIO bus init */ + error = ravb_mdio_init(priv); + if (error) { + netdev_err(ndev, "failed to initialize MDIO\n"); + return error; + } + napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]); @@ -1421,6 +1473,7 @@ static int ravb_open(struct net_device *ndev) out_napi_off: napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); return error; } @@ -1718,6 +1771,8 @@ static int ravb_close(struct net_device *ndev) ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); + ravb_mdio_release(priv); + return 0; } @@ -1820,51 +1875,6 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_change_mtu = eth_change_mtu, }; -/* MDIO bus init function */ -static int ravb_mdio_init(struct ravb_private *priv) -{ - struct platform_device *pdev = priv->pdev; - struct device *dev = &pdev->dev; - int error; - - /* Bitbang init */ - priv->mdiobb.ops = &bb_ops; - - /* MII controller setting */ - priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); - if (!priv->mii_bus) - return -ENOMEM; - - /* Hook up MII support for ethtool */ - priv->mii_bus->name = "ravb_mii"; - priv->mii_bus->parent = dev; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); - - /* Register MDIO bus */ - error = of_mdiobus_register(priv->mii_bus, dev->of_node); - if (error) - goto out_free_bus; - - return 0; - -out_free_bus: - free_mdio_bitbang(priv->mii_bus); - return error; -} - -/* MDIO bus release function */ -static int ravb_mdio_release(struct ravb_private *priv) -{ - /* Unregister mdio bus */ - mdiobus_unregister(priv->mii_bus); - - /* Free bitbang info */ - free_mdio_bitbang(priv->mii_bus); - - return 0; -} - static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, @@ -2069,13 +2079,6 @@ static int ravb_probe(struct platform_device *pdev) eth_hw_addr_random(ndev); } - /* MDIO bus init */ - error = ravb_mdio_init(priv); - if (error) { - dev_err(&pdev->dev, "failed to initialize MDIO\n"); - goto out_dma_free; - } - netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); @@ -2095,8 +2098,6 @@ static int ravb_probe(struct platform_device *pdev) out_napi_del: netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); -out_dma_free: dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); @@ -2129,7 +2130,6 @@ static int ravb_remove(struct platform_device *pdev) unregister_netdev(ndev); netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); pm_runtime_disable(&pdev->dev); free_netdev(ndev); platform_set_drvdata(pdev, NULL); From 3af826c80d371b3ba4511cf5315acbac064c6f5c Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Sun, 23 Aug 2020 16:56:47 +0800 Subject: [PATCH 20/72] net: arc_emac: Fix memleak in arc_mdio_probe [ Upstream commit e2d79cd8875fa8c3cc7defa98a8cc99a1ed0c62f ] When devm_gpiod_get_optional() fails, bus should be freed just like when of_mdiobus_register() fails. Fixes: 1bddd96cba03d ("net: arc_emac: support the phy reset for emac driver") Signed-off-by: Dinghao Liu Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/arc/emac_mdio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index a22403c688c9..337cfce78aef 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -152,6 +152,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) if (IS_ERR(data->reset_gpio)) { error = PTR_ERR(data->reset_gpio); dev_err(priv->dev, "Failed to request gpio: %d\n", error); + mdiobus_free(bus); return error; } From 8064eaf3a52469936adbb7da7ae32c90492fbdf4 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Tue, 25 Aug 2020 08:46:17 +0200 Subject: [PATCH 21/72] dmaengine: pl330: Fix burst length if burst size is smaller than bus width [ Upstream commit 0661cef675d37e2c4b66a996389ebeae8568e49e ] Move the burst len fixup after setting the generic value for it. This finally enables the fixup introduced by commit 137bd11090d8 ("dmaengine: pl330: Align DMA memcpy operations to MFIFO width"), which otherwise was overwritten by the generic value. Reported-by: kernel test robot Fixes: 137bd11090d8 ("dmaengine: pl330: Align DMA memcpy operations to MFIFO width") Signed-off-by: Marek Szyprowski Link: https://lore.kernel.org/r/20200825064617.16193-1-m.szyprowski@samsung.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/pl330.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 57b375d0de29..16c08846ea0e 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2677,6 +2677,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; + desc->rqcfg.brst_len = get_burst_len(desc, len); /* * If burst size is smaller than bus width then make sure we only * transfer one at a time to avoid a burst stradling an MFIFO entry. @@ -2684,7 +2685,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) desc->rqcfg.brst_len = 1; - desc->rqcfg.brst_len = get_burst_len(desc, len); desc->bytes_requested = len; desc->txd.flags = flags; From 7f3aa14ce1c8a2a2d22da48ca42727d8875d6228 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Wed, 26 Aug 2020 01:08:33 -0400 Subject: [PATCH 22/72] bnxt_en: Check for zero dir entries in NVRAM. [ Upstream commit dbbfa96ad920c50d58bcaefa57f5f33ceef9d00e ] If firmware goes into unstable state, HWRM_NVM_GET_DIR_INFO firmware command may return zero dir entries. Return error in such case to avoid zero length dma buffer request. Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 427d4dbc9735..ac03bba10e4f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1457,6 +1457,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) if (rc != 0) return rc; + if (!dir_entries || !entry_length) + return -EIO; + /* Insert 2 bytes of directory info (count and size of entries) */ if (len < 2) return -EINVAL; From ba13c07c58562d2c5e04928eccca01a5577ca5ba Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Wed, 26 Aug 2020 01:08:35 -0400 Subject: [PATCH 23/72] bnxt_en: Fix PCI AER error recovery flow [ Upstream commit df3875ec550396974b1d8a518bd120d034738236 ] When a PCI error is detected the PCI state could be corrupt, save the PCI state after initialization and restore it after the slot reset. Fixes: 6316ea6db93d ("bnxt_en: Enable AER support.") Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 421cbba9a3bc..f451be63ab7e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7085,6 +7085,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_parse_log_pcie_link(bp); + pci_save_state(pdev); return 0; init_err: @@ -7158,6 +7159,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) "Cannot re-enable PCI device after reset.\n"); } else { pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); if (netif_running(netdev)) err = bnxt_open(netdev); From 8238ee93a30a5ff6fc75751e122a28e0d92f3e12 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 2 Sep 2020 11:30:48 -0400 Subject: [PATCH 24/72] fix regression in "epoll: Keep a reference on files added to the check list" [ Upstream commit 77f4689de17c0887775bb77896f4cc11a39bf848 ] epoll_loop_check_proc() can run into a file already committed to destruction; we can't grab a reference on those and don't need to add them to the set for reverse path check anyway. Tested-by: Marc Zyngier Fixes: a9ed4a6560b8 ("epoll: Keep a reference on files added to the check list") Signed-off-by: Al Viro Signed-off-by: Sasha Levin --- fs/eventpoll.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index aad52e185836..8c40d6652a9a 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1748,9 +1748,9 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) * during ep_insert(). */ if (list_empty(&epi->ffd.file->f_tfile_llink)) { - get_file(epi->ffd.file); - list_add(&epi->ffd.file->f_tfile_llink, - &tfile_check_list); + if (get_file_rcu(epi->ffd.file)) + list_add(&epi->ffd.file->f_tfile_llink, + &tfile_check_list); } } } From dbf1ea0b3803b29675cc3dbea873d9ff25333e7d Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 3 Sep 2020 14:28:54 -0400 Subject: [PATCH 25/72] tg3: Fix soft lockup when tg3_reset_task() fails. [ Upstream commit 556699341efa98243e08e34401b3f601da91f5a3 ] If tg3_reset_task() fails, the device state is left in an inconsistent state with IFF_RUNNING still set but NAPI state not enabled. A subsequent operation, such as ifdown or AER error can cause it to soft lock up when it tries to disable NAPI state. Fix it by bringing down the device to !IFF_RUNNING state when tg3_reset_task() fails. tg3_reset_task() running from workqueue will now call tg3_close() when the reset fails. We need to modify tg3_reset_task_cancel() slightly to avoid tg3_close() calling cancel_work_sync() to cancel tg3_reset_task(). Otherwise cancel_work_sync() will wait forever for tg3_reset_task() to finish. Reported-by: David Christensen Reported-by: Baptiste Covolato Fixes: db2199737990 ("tg3: Schedule at most one tg3_reset_task run") Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/tg3.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 5790b35064a8..2db6102ed584 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7201,8 +7201,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp) static inline void tg3_reset_task_cancel(struct tg3 *tp) { - cancel_work_sync(&tp->reset_task); - tg3_flag_clear(tp, RESET_TASK_PENDING); + if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) + cancel_work_sync(&tp->reset_task); tg3_flag_clear(tp, TX_RECOVERY_PENDING); } @@ -11174,18 +11174,27 @@ static void tg3_reset_task(struct work_struct *work) tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); err = tg3_init_hw(tp, true); - if (err) + if (err) { + tg3_full_unlock(tp); + tp->irq_sync = 0; + tg3_napi_enable(tp); + /* Clear this flag so that tg3_reset_task_cancel() will not + * call cancel_work_sync() and wait forever. + */ + tg3_flag_clear(tp, RESET_TASK_PENDING); + dev_close(tp->dev); goto out; + } tg3_netif_start(tp); -out: tg3_full_unlock(tp); if (!err) tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); +out: rtnl_unlock(); } From 094031c4a363bb07a77a595b731237a41028e85f Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 28 Aug 2020 08:06:15 +0800 Subject: [PATCH 26/72] iommu/vt-d: Serialize IOMMU GCMD register modifications [ Upstream commit 6e4e9ec65078093165463c13d4eb92b3e8d7b2e8 ] The VT-d spec requires (10.4.4 Global Command Register, GCMD_REG General Description) that: If multiple control fields in this register need to be modified, software must serialize the modifications through multiple writes to this register. However, in irq_remapping.c, modifications of IRE and CFI are done in one write. We need to do two separate writes with STS checking after each. It also checks the status register before writing command register to avoid unnecessary register write. Fixes: af8d102f999a4 ("x86/intel/irq_remapping: Clean up x2apic opt-out security warning mess") Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Cc: Andy Lutomirski Cc: Jacob Pan Cc: Kevin Tian Cc: Ashok Raj Link: https://lore.kernel.org/r/20200828000615.8281-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel Signed-off-by: Sasha Levin --- drivers/iommu/intel_irq_remapping.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index ac596928f6b4..ce125ec23d2a 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -486,12 +486,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu) /* Enable interrupt-remapping */ iommu->gcmd |= DMA_GCMD_IRE; - iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRES), sts); + /* Block compatibility-format MSIs */ + if (sts & DMA_GSTS_CFIS) { + iommu->gcmd &= ~DMA_GCMD_CFI; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, !(sts & DMA_GSTS_CFIS), sts); + } + /* * With CFI clear in the Global Command register, we should be * protected from dangerous (i.e. compatibility) interrupts From 2e4a2bac24b8fd3ad95c152d911831e120f79b96 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 6 Jul 2020 11:33:38 -0700 Subject: [PATCH 27/72] thermal: ti-soc-thermal: Fix bogus thermal shutdowns for omap4430 [ Upstream commit 30d24faba0532d6972df79a1bf060601994b5873 ] We can sometimes get bogus thermal shutdowns on omap4430 at least with droid4 running idle with a battery charger connected: thermal thermal_zone0: critical temperature reached (143 C), shutting down Dumping out the register values shows we can occasionally get a 0x7f value that is outside the TRM listed values in the ADC conversion table. And then we get a normal value when reading again after that. Reading the register multiple times does not seem help avoiding the bogus values as they stay until the next sample is ready. Looking at the TRM chapter "18.4.10.2.3 ADC Codes Versus Temperature", we should have values from 13 to 107 listed with a total of 95 values. But looking at the omap4430_adc_to_temp array, the values are off, and the end values are missing. And it seems that the 4430 ADC table is similar to omap3630 rather than omap4460. Let's fix the issue by using values based on the omap3630 table and just ignoring invalid values. Compared to the 4430 TRM, the omap3630 table has the missing values added while the TRM table only shows every second value. Note that sometimes the ADC register values within the valid table can also be way off for about 1 out of 10 values. But it seems that those just show about 25 C too low values rather than too high values. So those do not cause a bogus thermal shutdown. Fixes: 1a31270e54d7 ("staging: omap-thermal: add OMAP4 data structures") Cc: Merlijn Wajer Cc: Pavel Machek Cc: Sebastian Reichel Signed-off-by: Tony Lindgren Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200706183338.25622-1-tony@atomide.com Signed-off-by: Sasha Levin --- .../ti-soc-thermal/omap4-thermal-data.c | 23 ++++++++++--------- .../thermal/ti-soc-thermal/omap4xxx-bandgap.h | 10 +++++--- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c index d255d33da9eb..02e71d461d5c 100644 --- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c +++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c @@ -49,20 +49,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = { /* * Temperature values in milli degree celsius - * ADC code values from 530 to 923 + * ADC code values from 13 to 107, see TRM + * "18.4.10.2.3 ADC Codes Versus Temperature". */ static const int omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = { - -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000, - -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000, - -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000, - 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000, - 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000, - 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000, - 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000, - 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000, - 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000, - 117000, 118000, 120000, 122000, 123000, + -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, + -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000, + -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000, + 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500, + 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000, + 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000, + 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000, + 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000, + 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000, + 115000, 117000, 118500, 120000, 122000, 123500, 125000, }; /* OMAP4430 data */ diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h index 6f2de3a3356d..86850082b24b 100644 --- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h +++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h @@ -67,9 +67,13 @@ * and thresholds for OMAP4430. */ -/* ADC conversion table limits */ -#define OMAP4430_ADC_START_VALUE 0 -#define OMAP4430_ADC_END_VALUE 127 +/* + * ADC conversion table limits. Ignore values outside the TRM listed + * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter + * "18.4.10.2.3 ADC Codes Versus Temperature". + */ +#define OMAP4430_ADC_START_VALUE 13 +#define OMAP4430_ADC_END_VALUE 107 /* bandgap clock limits (no control on 4430) */ #define OMAP4430_MAX_FREQ 32768 #define OMAP4430_MIN_FREQ 32768 From ad68326969f850d6ddf8151ee76299078f73a3cf Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 4 Sep 2020 16:36:19 -0700 Subject: [PATCH 28/72] include/linux/log2.h: add missing () around n in roundup_pow_of_two() [ Upstream commit 428fc0aff4e59399ec719ffcc1f7a5d29a4ee476 ] Otherwise gcc generates warnings if the expression is complicated. Fixes: 312a0c170945 ("[PATCH] LOG2: Alter roundup_pow_of_two() so that it can use a ilog2() on a constant") Signed-off-by: Jason Gunthorpe Signed-off-by: Andrew Morton Link: https://lkml.kernel.org/r/0-v1-8a2697e3c003+41165-log_brackets_jgg@nvidia.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- include/linux/log2.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/log2.h b/include/linux/log2.h index c373295f359f..cca606609e1b 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -159,7 +159,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n == 1) ? 1 : \ + ((n) == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ From 4e989d243e65b2cd79442f4413c91a7d62bf075d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 10 Aug 2020 11:42:26 -0400 Subject: [PATCH 29/72] btrfs: drop path before adding new uuid tree entry commit 9771a5cf937129307d9f58922d60484d58ababe7 upstream. With the conversion of the tree locks to rwsem I got the following lockdep splat: ====================================================== WARNING: possible circular locking dependency detected 5.8.0-rc7-00167-g0d7ba0c5b375-dirty #925 Not tainted ------------------------------------------------------ btrfs-uuid/7955 is trying to acquire lock: ffff88bfbafec0f8 (btrfs-root-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180 but task is already holding lock: ffff88bfbafef2a8 (btrfs-uuid-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (btrfs-uuid-00){++++}-{3:3}: down_read_nested+0x3e/0x140 __btrfs_tree_read_lock+0x39/0x180 __btrfs_read_lock_root_node+0x3a/0x50 btrfs_search_slot+0x4bd/0x990 btrfs_uuid_tree_add+0x89/0x2d0 btrfs_uuid_scan_kthread+0x330/0x390 kthread+0x133/0x150 ret_from_fork+0x1f/0x30 -> #0 (btrfs-root-00){++++}-{3:3}: __lock_acquire+0x1272/0x2310 lock_acquire+0x9e/0x360 down_read_nested+0x3e/0x140 __btrfs_tree_read_lock+0x39/0x180 __btrfs_read_lock_root_node+0x3a/0x50 btrfs_search_slot+0x4bd/0x990 btrfs_find_root+0x45/0x1b0 btrfs_read_tree_root+0x61/0x100 btrfs_get_root_ref.part.50+0x143/0x630 btrfs_uuid_tree_iterate+0x207/0x314 btrfs_uuid_rescan_kthread+0x12/0x50 kthread+0x133/0x150 ret_from_fork+0x1f/0x30 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(btrfs-uuid-00); lock(btrfs-root-00); lock(btrfs-uuid-00); lock(btrfs-root-00); *** DEADLOCK *** 1 lock held by btrfs-uuid/7955: #0: ffff88bfbafef2a8 (btrfs-uuid-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180 stack backtrace: CPU: 73 PID: 7955 Comm: btrfs-uuid Kdump: loaded Not tainted 5.8.0-rc7-00167-g0d7ba0c5b375-dirty #925 Hardware name: Quanta Tioga Pass Single Side 01-0030993006/Tioga Pass Single Side, BIOS F08_3A18 12/20/2018 Call Trace: dump_stack+0x78/0xa0 check_noncircular+0x165/0x180 __lock_acquire+0x1272/0x2310 lock_acquire+0x9e/0x360 ? __btrfs_tree_read_lock+0x39/0x180 ? btrfs_root_node+0x1c/0x1d0 down_read_nested+0x3e/0x140 ? __btrfs_tree_read_lock+0x39/0x180 __btrfs_tree_read_lock+0x39/0x180 __btrfs_read_lock_root_node+0x3a/0x50 btrfs_search_slot+0x4bd/0x990 btrfs_find_root+0x45/0x1b0 btrfs_read_tree_root+0x61/0x100 btrfs_get_root_ref.part.50+0x143/0x630 btrfs_uuid_tree_iterate+0x207/0x314 ? btree_readpage+0x20/0x20 btrfs_uuid_rescan_kthread+0x12/0x50 kthread+0x133/0x150 ? kthread_create_on_node+0x60/0x60 ret_from_fork+0x1f/0x30 This problem exists because we have two different rescan threads, btrfs_uuid_scan_kthread which creates the uuid tree, and btrfs_uuid_tree_iterate that goes through and updates or deletes any out of date roots. The problem is they both do things in different order. btrfs_uuid_scan_kthread() reads the tree_root, and then inserts entries into the uuid_root. btrfs_uuid_tree_iterate() scans the uuid_root, but then does a btrfs_get_fs_root() which can read from the tree_root. It's actually easy enough to not be holding the path in btrfs_uuid_scan_kthread() when we add a uuid entry, as we already drop it further down and re-start the search when we loop. So simply move the path release before we add our entry to the uuid tree. This also fixes a problem where we're holding a path open after we do btrfs_end_transaction(), which has it's own problems. CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Filipe Manana Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/volumes.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index bace03a546b2..c31b02692f70 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4181,6 +4181,7 @@ static int btrfs_uuid_scan_kthread(void *data) goto skip; } update_tree: + btrfs_release_path(path); if (!btrfs_is_empty_uuid(root_item.uuid)) { ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, root_item.uuid, @@ -4206,6 +4207,7 @@ static int btrfs_uuid_scan_kthread(void *data) } skip: + btrfs_release_path(path); if (trans) { ret = btrfs_end_transaction(trans, fs_info->uuid_root); trans = NULL; @@ -4213,7 +4215,6 @@ static int btrfs_uuid_scan_kthread(void *data) break; } - btrfs_release_path(path); if (key.offset < (u64)-1) { key.offset++; } else if (key.type < BTRFS_ROOT_ITEM_KEY) { From a2c79a9f156a6db0c1e18d0c165d419c8e4dc2b2 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 15 Aug 2018 18:26:53 +0300 Subject: [PATCH 30/72] btrfs: Remove redundant extent_buffer_get in get_old_root [ Upstream commit 6c122e2a0c515cfb3f3a9cefb5dad4cb62109c78 ] get_old_root used used only by btrfs_search_old_slot to initialise the path structure. The old root is always a cloned buffer (either via alloc dummy or via btrfs_clone_extent_buffer) and its reference count is 2: 1 from allocation, 1 from extent_buffer_get call in get_old_root. This latter explicit ref count acquire operation is in fact unnecessary since the semantic is such that the newly allocated buffer is handed over to the btrfs_path for lifetime management. Considering this just remove the extra extent_buffer_get in get_old_root. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/ctree.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index b5ebb43b1824..78d4c8c22b4a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1430,7 +1430,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq) if (!eb) return NULL; - extent_buffer_get(eb); btrfs_tree_read_lock(eb); if (old_root) { btrfs_set_header_bytenr(eb, eb->start); From 87ce624f5cb032e17c8c95a89d6e291637fe7382 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 15 Aug 2018 18:26:54 +0300 Subject: [PATCH 31/72] btrfs: Remove extraneous extent_buffer_get from tree_mod_log_rewind [ Upstream commit 24cee18a1c1d7c731ea5987e0c99daea22ae7f4a ] When a rewound buffer is created it already has a ref count of 1 and the dummy flag set. Then another ref is taken bumping the count to 2. Finally when this buffer is released from btrfs_release_path the extra reference is decremented by the special handling code in free_extent_buffer. However, this special code is in fact redundant sinca ref count of 1 is still correct since the buffer is only accessed via btrfs_path struct. This paves the way forward of removing the special handling in free_extent_buffer. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/ctree.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 78d4c8c22b4a..406ae49baa07 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1360,7 +1360,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); - extent_buffer_get(eb_rewin); btrfs_tree_read_lock(eb_rewin); __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); WARN_ON(btrfs_header_nritems(eb_rewin) > From cc037d65658da025b45b229e996777baec72049c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 10 Aug 2020 11:42:31 -0400 Subject: [PATCH 32/72] btrfs: set the lockdep class for log tree extent buffers [ Upstream commit d3beaa253fd6fa40b8b18a216398e6e5376a9d21 ] These are special extent buffers that get rewound in order to lookup the state of the tree at a specific point in time. As such they do not go through the normal initialization paths that set their lockdep class, so handle them appropriately when they are created and before they are locked. CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Filipe Manana Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/ctree.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 406ae49baa07..65689cbc362d 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1360,6 +1360,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin), + eb_rewin, btrfs_header_level(eb_rewin)); btrfs_tree_read_lock(eb_rewin); __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); WARN_ON(btrfs_header_nritems(eb_rewin) > @@ -1429,7 +1431,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq) if (!eb) return NULL; - btrfs_tree_read_lock(eb); if (old_root) { btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); @@ -1437,6 +1438,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq) btrfs_set_header_level(eb, old_root->level); btrfs_set_header_generation(eb, old_generation); } + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb, + btrfs_header_level(eb)); + btrfs_tree_read_lock(eb); if (tm) __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm); else From ab6d8b281d8d8a8103cf0a828c492a25c5f264dc Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 15 May 2019 14:38:18 +0900 Subject: [PATCH 33/72] uaccess: Add non-pagefault user-space read functions [ Upstream commit 3d7081822f7f9eab867d9bcc8fd635208ec438e0 ] Add probe_user_read(), strncpy_from_unsafe_user() and strnlen_unsafe_user() which allows caller to access user-space in IRQ context. Current probe_kernel_read() and strncpy_from_unsafe() are not available for user-space memory, because it sets KERNEL_DS while accessing data. On some arch, user address space and kernel address space can be co-exist, but others can not. In that case, setting KERNEL_DS means given address is treated as a kernel address space. Also strnlen_user() is only available from user context since it can sleep if pagefault is enabled. To access user-space memory without pagefault, we need these new functions which sets USER_DS while accessing the data. Link: http://lkml.kernel.org/r/155789869802.26965.4940338412595759063.stgit@devnote2 Acked-by: Ingo Molnar Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Sasha Levin --- include/linux/uaccess.h | 14 +++++ mm/maccess.c | 122 ++++++++++++++++++++++++++++++++++++++-- 2 files changed, 130 insertions(+), 6 deletions(-) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 9442423979c1..6d27d58ca4e0 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -90,6 +90,17 @@ static inline unsigned long __copy_from_user_nocache(void *to, extern long probe_kernel_read(void *dst, const void *src, size_t size); extern long __probe_kernel_read(void *dst, const void *src, size_t size); +/* + * probe_user_read(): safely attempt to read from a location in user space + * @dst: pointer to the buffer that shall take the data + * @src: address to read from + * @size: size of the data chunk + * + * Safely read from address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long probe_user_read(void *dst, const void __user *src, size_t size); + /* * probe_kernel_write(): safely attempt to write to a location * @dst: address to write to @@ -103,6 +114,9 @@ extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); +extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, + long count); +extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); /** * probe_kernel_address(): safely attempt to read from a location diff --git a/mm/maccess.c b/mm/maccess.c index 78f9274dd49d..5ebf9a3ca367 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -5,8 +5,20 @@ #include #include +static __always_inline long +probe_read_common(void *dst, const void __user *src, size_t size) +{ + long ret; + + pagefault_disable(); + ret = __copy_from_user_inatomic(dst, src, size); + pagefault_enable(); + + return ret ? -EFAULT : 0; +} + /** - * probe_kernel_read(): safely attempt to read from a location + * probe_kernel_read(): safely attempt to read from a kernel-space location * @dst: pointer to the buffer that shall take the data * @src: address to read from * @size: size of the data chunk @@ -29,16 +41,40 @@ long __probe_kernel_read(void *dst, const void *src, size_t size) mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); - pagefault_disable(); - ret = __copy_from_user_inatomic(dst, - (__force const void __user *)src, size); - pagefault_enable(); + ret = probe_read_common(dst, (__force const void __user *)src, size); set_fs(old_fs); - return ret ? -EFAULT : 0; + return ret; } EXPORT_SYMBOL_GPL(probe_kernel_read); +/** + * probe_user_read(): safely attempt to read from a user-space location + * @dst: pointer to the buffer that shall take the data + * @src: address to read from. This must be a user address. + * @size: size of the data chunk + * + * Safely read from user address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ + +long __weak probe_user_read(void *dst, const void __user *src, size_t size) + __attribute__((alias("__probe_user_read"))); + +long __probe_user_read(void *dst, const void __user *src, size_t size) +{ + long ret = -EFAULT; + mm_segment_t old_fs = get_fs(); + + set_fs(USER_DS); + if (access_ok(VERIFY_READ, src, size)) + ret = probe_read_common(dst, src, size); + set_fs(old_fs); + + return ret; +} +EXPORT_SYMBOL_GPL(probe_user_read); + /** * probe_kernel_write(): safely attempt to write to a location * @dst: address to write to @@ -66,6 +102,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) } EXPORT_SYMBOL_GPL(probe_kernel_write); + /** * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. * @dst: Destination address, in kernel space. This buffer must be at @@ -105,3 +142,76 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) return ret ? -EFAULT : src - unsafe_addr; } + +/** + * strncpy_from_unsafe_user: - Copy a NUL terminated string from unsafe user + * address. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @unsafe_addr: Unsafe user address. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from unsafe user address to kernel buffer. + * + * On success, returns the length of the string INCLUDING the trailing NUL. + * + * If access fails, returns -EFAULT (some data may have been copied + * and the trailing NUL added). + * + * If @count is smaller than the length of the string, copies @count-1 bytes, + * sets the last byte of @dst buffer to NUL and returns @count. + */ +long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, + long count) +{ + mm_segment_t old_fs = get_fs(); + long ret; + + if (unlikely(count <= 0)) + return 0; + + set_fs(USER_DS); + pagefault_disable(); + ret = strncpy_from_user(dst, unsafe_addr, count); + pagefault_enable(); + set_fs(old_fs); + + if (ret >= count) { + ret = count; + dst[ret - 1] = '\0'; + } else if (ret > 0) { + ret++; + } + + return ret; +} + +/** + * strnlen_unsafe_user: - Get the size of a user string INCLUDING final NUL. + * @unsafe_addr: The string to measure. + * @count: Maximum count (including NUL) + * + * Get the size of a NUL-terminated string in user space without pagefault. + * + * Returns the size of the string INCLUDING the terminating NUL. + * + * If the string is too long, returns a number larger than @count. User + * has to check the return value against "> count". + * On exception (or invalid count), returns 0. + * + * Unlike strnlen_user, this can be used from IRQ handler etc. because + * it disables pagefaults. + */ +long strnlen_unsafe_user(const void __user *unsafe_addr, long count) +{ + mm_segment_t old_fs = get_fs(); + int ret; + + set_fs(USER_DS); + pagefault_disable(); + ret = strnlen_user(unsafe_addr, count); + pagefault_enable(); + set_fs(old_fs); + + return ret; +} From 6faf75bacc39732aac1785340f9bf959a2425699 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:17:56 +0100 Subject: [PATCH 34/72] uaccess: Add non-pagefault user-space write function [ Upstream commit 1d1585ca0f48fe7ed95c3571f3e4a82b2b5045dc ] Commit 3d7081822f7f ("uaccess: Add non-pagefault user-space read functions") missed to add probe write function, therefore factor out a probe_write_common() helper with most logic of probe_kernel_write() except setting KERNEL_DS, and add a new probe_user_write() helper so it can be used from BPF side. Again, on some archs, the user address space and kernel address space can co-exist and be overlapping, so in such case, setting KERNEL_DS would mean that the given address is treated as being in kernel address space. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Cc: Masami Hiramatsu Link: https://lore.kernel.org/bpf/9df2542e68141bfa3addde631441ee45503856a8.1572649915.git.daniel@iogearbox.net Signed-off-by: Sasha Levin --- include/linux/uaccess.h | 12 +++++++++++ mm/maccess.c | 45 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 53 insertions(+), 4 deletions(-) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 6d27d58ca4e0..cc5ba47062e8 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -113,6 +113,18 @@ extern long probe_user_read(void *dst, const void __user *src, size_t size); extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); +/* + * probe_user_write(): safely attempt to write to a location in user space + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); +extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); + extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, long count); diff --git a/mm/maccess.c b/mm/maccess.c index 5ebf9a3ca367..03ea550f5a74 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -17,6 +17,18 @@ probe_read_common(void *dst, const void __user *src, size_t size) return ret ? -EFAULT : 0; } +static __always_inline long +probe_write_common(void __user *dst, const void *src, size_t size) +{ + long ret; + + pagefault_disable(); + ret = __copy_to_user_inatomic(dst, src, size); + pagefault_enable(); + + return ret ? -EFAULT : 0; +} + /** * probe_kernel_read(): safely attempt to read from a kernel-space location * @dst: pointer to the buffer that shall take the data @@ -84,6 +96,7 @@ EXPORT_SYMBOL_GPL(probe_user_read); * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ + long __weak probe_kernel_write(void *dst, const void *src, size_t size) __attribute__((alias("__probe_kernel_write"))); @@ -93,15 +106,39 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); - pagefault_disable(); - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); - pagefault_enable(); + ret = probe_write_common((__force void __user *)dst, src, size); set_fs(old_fs); - return ret ? -EFAULT : 0; + return ret; } EXPORT_SYMBOL_GPL(probe_kernel_write); +/** + * probe_user_write(): safely attempt to write to a user-space location + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ + +long __weak probe_user_write(void __user *dst, const void *src, size_t size) + __attribute__((alias("__probe_user_write"))); + +long __probe_user_write(void __user *dst, const void *src, size_t size) +{ + long ret = -EFAULT; + mm_segment_t old_fs = get_fs(); + + set_fs(USER_DS); + if (access_ok(VERIFY_WRITE, dst, size)) + ret = probe_write_common(dst, src, size); + set_fs(old_fs); + + return ret; +} +EXPORT_SYMBOL_GPL(probe_user_write); /** * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. From 52245f066ca7027acc270050e6b3916059ee21c7 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 10 Aug 2020 11:42:27 -0400 Subject: [PATCH 35/72] btrfs: fix potential deadlock in the search ioctl [ Upstream commit a48b73eca4ceb9b8a4b97f290a065335dbcd8a04 ] With the conversion of the tree locks to rwsem I got the following lockdep splat: ====================================================== WARNING: possible circular locking dependency detected 5.8.0-rc7-00165-g04ec4da5f45f-dirty #922 Not tainted ------------------------------------------------------ compsize/11122 is trying to acquire lock: ffff889fabca8768 (&mm->mmap_lock#2){++++}-{3:3}, at: __might_fault+0x3e/0x90 but task is already holding lock: ffff889fe720fe40 (btrfs-fs-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (btrfs-fs-00){++++}-{3:3}: down_write_nested+0x3b/0x70 __btrfs_tree_lock+0x24/0x120 btrfs_search_slot+0x756/0x990 btrfs_lookup_inode+0x3a/0xb4 __btrfs_update_delayed_inode+0x93/0x270 btrfs_async_run_delayed_root+0x168/0x230 btrfs_work_helper+0xd4/0x570 process_one_work+0x2ad/0x5f0 worker_thread+0x3a/0x3d0 kthread+0x133/0x150 ret_from_fork+0x1f/0x30 -> #1 (&delayed_node->mutex){+.+.}-{3:3}: __mutex_lock+0x9f/0x930 btrfs_delayed_update_inode+0x50/0x440 btrfs_update_inode+0x8a/0xf0 btrfs_dirty_inode+0x5b/0xd0 touch_atime+0xa1/0xd0 btrfs_file_mmap+0x3f/0x60 mmap_region+0x3a4/0x640 do_mmap+0x376/0x580 vm_mmap_pgoff+0xd5/0x120 ksys_mmap_pgoff+0x193/0x230 do_syscall_64+0x50/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xa9 -> #0 (&mm->mmap_lock#2){++++}-{3:3}: __lock_acquire+0x1272/0x2310 lock_acquire+0x9e/0x360 __might_fault+0x68/0x90 _copy_to_user+0x1e/0x80 copy_to_sk.isra.32+0x121/0x300 search_ioctl+0x106/0x200 btrfs_ioctl_tree_search_v2+0x7b/0xf0 btrfs_ioctl+0x106f/0x30a0 ksys_ioctl+0x83/0xc0 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x50/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xa9 other info that might help us debug this: Chain exists of: &mm->mmap_lock#2 --> &delayed_node->mutex --> btrfs-fs-00 Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(btrfs-fs-00); lock(&delayed_node->mutex); lock(btrfs-fs-00); lock(&mm->mmap_lock#2); *** DEADLOCK *** 1 lock held by compsize/11122: #0: ffff889fe720fe40 (btrfs-fs-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180 stack backtrace: CPU: 17 PID: 11122 Comm: compsize Kdump: loaded Not tainted 5.8.0-rc7-00165-g04ec4da5f45f-dirty #922 Hardware name: Quanta Tioga Pass Single Side 01-0030993006/Tioga Pass Single Side, BIOS F08_3A18 12/20/2018 Call Trace: dump_stack+0x78/0xa0 check_noncircular+0x165/0x180 __lock_acquire+0x1272/0x2310 lock_acquire+0x9e/0x360 ? __might_fault+0x3e/0x90 ? find_held_lock+0x72/0x90 __might_fault+0x68/0x90 ? __might_fault+0x3e/0x90 _copy_to_user+0x1e/0x80 copy_to_sk.isra.32+0x121/0x300 ? btrfs_search_forward+0x2a6/0x360 search_ioctl+0x106/0x200 btrfs_ioctl_tree_search_v2+0x7b/0xf0 btrfs_ioctl+0x106f/0x30a0 ? __do_sys_newfstat+0x5a/0x70 ? ksys_ioctl+0x83/0xc0 ksys_ioctl+0x83/0xc0 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x50/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xa9 The problem is we're doing a copy_to_user() while holding tree locks, which can deadlock if we have to do a page fault for the copy_to_user(). This exists even without my locking changes, so it needs to be fixed. Rework the search ioctl to do the pre-fault and then copy_to_user_nofault for the copying. CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Filipe Manana Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/extent_io.c | 8 ++++---- fs/btrfs/extent_io.h | 6 +++--- fs/btrfs/ioctl.c | 27 ++++++++++++++++++++------- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index fa22bb29eee6..d6c827a9ebc5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -5488,9 +5488,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, } } -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dstv, - unsigned long start, unsigned long len) +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dstv, + unsigned long start, unsigned long len) { size_t cur; size_t offset; @@ -5511,7 +5511,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb, cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); - if (copy_to_user(dst, kaddr + offset, cur)) { + if (probe_user_write(dst, kaddr + offset, cur)) { ret = -EFAULT; break; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 9ecdc9584df7..75c03aa1800f 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -401,9 +401,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, void read_extent_buffer(const struct extent_buffer *eb, void *dst, unsigned long start, unsigned long len); -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dst, unsigned long start, - unsigned long len); +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dst, unsigned long start, + unsigned long len); void write_extent_buffer(struct extent_buffer *eb, const void *src, unsigned long start, unsigned long len); void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index eefe103c65da..6db46daeed16 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2041,9 +2041,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, sh.len = item_len; sh.transid = found_transid; - /* copy search result header */ - if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { - ret = -EFAULT; + /* + * Copy search result header. If we fault then loop again so we + * can fault in the pages and -EFAULT there if there's a + * problem. Otherwise we'll fault and then copy the buffer in + * properly this next time through + */ + if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) { + ret = 0; goto out; } @@ -2051,10 +2056,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, if (item_len) { char __user *up = ubuf + *sk_offset; - /* copy the item */ - if (read_extent_buffer_to_user(leaf, up, - item_off, item_len)) { - ret = -EFAULT; + /* + * Copy the item, same behavior as above, but reset the + * * sk_offset so we copy the full thing again. + */ + if (read_extent_buffer_to_user_nofault(leaf, up, + item_off, item_len)) { + ret = 0; + *sk_offset -= sizeof(sh); goto out; } @@ -2142,6 +2151,10 @@ static noinline int search_ioctl(struct inode *inode, key.offset = sk->min_offset; while (1) { + ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset); + if (ret) + break; + ret = btrfs_search_forward(root, &key, path, sk->min_transid); if (ret != 0) { if (ret > 0) From 6a14d943fcc6dcabf52690732471fe84ff0ee181 Mon Sep 17 00:00:00 2001 From: Daniele Palmas Date: Wed, 9 Oct 2019 11:07:18 +0200 Subject: [PATCH 36/72] net: usb: qmi_wwan: add Telit 0x1050 composition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e0ae2c578d3909e60e9448207f5d83f785f1129f ] This patch adds support for Telit FN980 0x1050 composition 0x1050: tty, adb, rmnet, tty, tty, tty, tty Signed-off-by: Daniele Palmas Acked-by: Bjørn Mork Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 254a27295f41..97a83d351a10 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -923,6 +923,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ From e2da4616ac0372a646389fd847c636a27e49213e Mon Sep 17 00:00:00 2001 From: Daniele Palmas Date: Mon, 10 Apr 2017 17:34:23 +0200 Subject: [PATCH 37/72] drivers: net: usb: qmi_wwan: add QMI_QUIRK_SET_DTR for Telit PID 0x1201 [ Upstream commit 14cf4a771b3098e431d2677e3533bdd962e478d8 ] Telit LE920A4 uses the same pid 0x1201 of LE920, but modem implementation is different, since it requires DTR to be set for answering to qmi messages. This patch replaces QMI_FIXED_INTF with QMI_QUIRK_SET_DTR: tests on LE920 have been performed in order to verify backward compatibility. Signed-off-by: Daniele Palmas Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/qmi_wwan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 97a83d351a10..6104500314d1 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -927,7 +927,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ - {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ From 53ce132f5555479424da90b1e62666392c64ebac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Tue, 13 Jun 2017 19:10:18 +0200 Subject: [PATCH 38/72] qmi_wwan: new Telewell and Sierra device IDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 60cfe1eaccb8af598ebe1bdc44e157ea30fcdd81 ] A new Sierra Wireless EM7305 device ID used in a Toshiba laptop, and two Longcheer device IDs entries used by Telewell TW-3G HSPA+ branded modems. Reported-by: Petr Kloc Reported-by: Teemu Likonen Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/qmi_wwan.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6104500314d1..d81233560021 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -910,6 +910,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ + {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ + {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ @@ -928,6 +930,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ + {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ + {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ From eba015400a0fc485e78f8b3430b35af290b2c0d9 Mon Sep 17 00:00:00 2001 From: Rogan Dawes Date: Wed, 17 Jul 2019 11:14:33 +0200 Subject: [PATCH 39/72] usb: qmi_wwan: add D-Link DWM-222 A2 device ID [ Upstream commit 7d6053097311643545a8118100175a39bd6fa637 ] Signed-off-by: Rogan Dawes Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index d81233560021..74c925cd19a9 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -890,6 +890,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ + {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ From ff797b3bc8a5dbd1bf8279482f9aee87379e2de3 Mon Sep 17 00:00:00 2001 From: Tong Zhang Date: Mon, 24 Aug 2020 18:45:41 -0400 Subject: [PATCH 40/72] ALSA: ca0106: fix error code handling commit ee0761d1d8222bcc5c86bf10849dc86cf008557c upstream. snd_ca0106_spi_write() returns 1 on error, snd_ca0106_pcm_power_dac() is returning the error code directly, and the caller is expecting an negative error code Signed-off-by: Tong Zhang Cc: Link: https://lore.kernel.org/r/20200824224541.1260307-1-ztong0001@gmail.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/ca0106/ca0106_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c index 6165a57a94ae..2c30a0672c17 100644 --- a/sound/pci/ca0106/ca0106_main.c +++ b/sound/pci/ca0106/ca0106_main.c @@ -551,7 +551,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id, else /* Power down */ chip->spi_dac_reg[reg] |= bit; - return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]); + if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0) + return -ENXIO; } return 0; } From a664f1a5d39ec4954e4a2e6fae4552ef3b35a166 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 1 Sep 2020 15:18:02 +0200 Subject: [PATCH 41/72] ALSA: pcm: oss: Remove superfluous WARN_ON() for mulaw sanity check commit 949a1ebe8cea7b342085cb6a4946b498306b9493 upstream. The PCM OSS mulaw plugin has a check of the format of the counter part whether it's a linear format. The check is with snd_BUG_ON() that emits WARN_ON() when the debug config is set, and it confuses syzkaller as if it were a serious issue. Let's drop snd_BUG_ON() for avoiding that. While we're at it, correct the error code to a more suitable, EINVAL. Reported-by: syzbot+23b22dc2e0b81cbfcc95@syzkaller.appspotmail.com Cc: Link: https://lore.kernel.org/r/20200901131802.18157-1-tiwai@suse.de Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/oss/mulaw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c index 3788906421a7..fe27034f2846 100644 --- a/sound/core/oss/mulaw.c +++ b/sound/core/oss/mulaw.c @@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug, snd_BUG(); return -EINVAL; } - if (snd_BUG_ON(!snd_pcm_format_linear(format->format))) - return -ENXIO; + if (!snd_pcm_format_linear(format->format)) + return -EINVAL; err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion", src_format, dst_format, From f66489209fcf8aea7038d0048e7a16a7e1067b05 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sun, 23 Aug 2020 16:55:45 +0900 Subject: [PATCH 42/72] ALSA: firewire-digi00x: exclude Avid Adrenaline from detection commit acd46a6b6de88569654567810acad2b0a0a25cea upstream. Avid Adrenaline is reported that ALSA firewire-digi00x driver is bound to. However, as long as he investigated, the design of this model is hardly similar to the one of Digi 00x family. It's better to exclude the model from modalias of ALSA firewire-digi00x driver. This commit changes device entries so that the model is excluded. $ python3 crpp < ~/git/am-config-rom/misc/avid-adrenaline.img ROM header and bus information block ----------------------------------------------------------------- 400 04203a9c bus_info_length 4, crc_length 32, crc 15004 404 31333934 bus_name "1394" 408 e064a002 irmc 1, cmc 1, isc 1, bmc 0, cyc_clk_acc 100, max_rec 10 (2048) 40c 00a07e01 company_id 00a07e | 410 00085257 device_id 0100085257 | EUI-64 00a07e0100085257 root directory ----------------------------------------------------------------- 414 0005d08c directory_length 5, crc 53388 418 0300a07e vendor 41c 8100000c --> descriptor leaf at 44c 420 0c008380 node capabilities 424 8d000002 --> eui-64 leaf at 42c 428 d1000004 --> unit directory at 438 eui-64 leaf at 42c ----------------------------------------------------------------- 42c 0002410f leaf_length 2, crc 16655 430 00a07e01 company_id 00a07e | 434 00085257 device_id 0100085257 | EUI-64 00a07e0100085257 unit directory at 438 ----------------------------------------------------------------- 438 0004d6c9 directory_length 4, crc 54985 43c 1200a02d specifier id: 1394 TA 440 13014001 version: Vender Unique and AV/C 444 17000001 model 448 81000009 --> descriptor leaf at 46c descriptor leaf at 44c ----------------------------------------------------------------- 44c 00077205 leaf_length 7, crc 29189 450 00000000 textual descriptor 454 00000000 minimal ASCII 458 41766964 "Avid" 45c 20546563 " Tec" 460 686e6f6c "hnol" 464 6f677900 "ogy" 468 00000000 descriptor leaf at 46c ----------------------------------------------------------------- 46c 000599a5 leaf_length 5, crc 39333 470 00000000 textual descriptor 474 00000000 minimal ASCII 478 41647265 "Adre" 47c 6e616c69 "nali" 480 6e650000 "ne" Reported-by: Simon Wood Fixes: 9edf723fd858 ("ALSA: firewire-digi00x: add skeleton for Digi 002/003 family") Cc: # 4.4+ Signed-off-by: Takashi Sakamoto Link: https://lore.kernel.org/r/20200823075545.56305-1-o-takashi@sakamocchi.jp Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/firewire/digi00x/digi00x.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c index ef689997d6a5..bf53e342788e 100644 --- a/sound/firewire/digi00x/digi00x.c +++ b/sound/firewire/digi00x/digi00x.c @@ -15,6 +15,7 @@ MODULE_LICENSE("GPL v2"); #define VENDOR_DIGIDESIGN 0x00a07e #define MODEL_CONSOLE 0x000001 #define MODEL_RACK 0x000002 +#define SPEC_VERSION 0x000001 static int name_card(struct snd_dg00x *dg00x) { @@ -185,14 +186,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = { /* Both of 002/003 use the same ID. */ { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_CONSOLE, }, { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_RACK, }, {} From 0c7cee63ec92b316f8b891b667177a080b670566 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 17 Aug 2020 18:00:55 +0800 Subject: [PATCH 43/72] block: allow for_each_bvec to support zero len bvec commit 7e24969022cbd61ddc586f14824fc205661bb124 upstream. Block layer usually doesn't support or allow zero-length bvec. Since commit 1bdc76aea115 ("iov_iter: use bvec iterator to implement iterate_bvec()"), iterate_bvec() switches to bvec iterator. However, Al mentioned that 'Zero-length segments are not disallowed' in iov_iter. Fixes for_each_bvec() so that it can move on after seeing one zero length bvec. Fixes: 1bdc76aea115 ("iov_iter: use bvec iterator to implement iterate_bvec()") Reported-by: syzbot Signed-off-by: Ming Lei Tested-by: Tetsuo Handa Cc: Al Viro Cc: Matthew Wilcox Cc: Link: https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg2262077.html Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- include/linux/bvec.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/bvec.h b/include/linux/bvec.h index 89b65b82d98f..8047c3ad77a6 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -88,10 +88,17 @@ static inline void bvec_iter_advance(const struct bio_vec *bv, } } +static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter) +{ + iter->bi_bvec_done = 0; + iter->bi_idx++; +} + #define for_each_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \ + (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter))) #endif /* __LINUX_BVEC_ITER_H */ From d24c407b0f7af675a3928fdd4121306ad32c60ab Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 14 Mar 2018 15:48:06 -0700 Subject: [PATCH 44/72] block: Move SECTOR_SIZE and SECTOR_SHIFT definitions into commit 233bde21aa43516baa013ef7ac33f3427056db3e upstream. It happens often while I'm preparing a patch for a block driver that I'm wondering: is a definition of SECTOR_SIZE and/or SECTOR_SHIFT available for this driver? Do I have to introduce definitions of these constants before I can use these constants? To avoid this confusion, move the existing definitions of SECTOR_SIZE and SECTOR_SHIFT into the header file such that these become available for all block drivers. Make the SECTOR_SIZE definition in the uapi msdos_fs.h header file conditional to avoid that including that header file after causes the compiler to complain about a SECTOR_SIZE redefinition. Note: the SECTOR_SIZE / SECTOR_SHIFT / SECTOR_BITS definitions have not been removed from uapi header files nor from NAND drivers in which these constants are used for another purpose than converting block layer offsets and sizes into a number of sectors. Cc: David S. Miller Cc: Mike Snitzer Cc: Dan Williams Cc: Minchan Kim Cc: Nitin Gupta Reviewed-by: Sergey Senozhatsky Reviewed-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- arch/xtensa/platforms/iss/simdisk.c | 1 - drivers/block/brd.c | 1 - drivers/block/rbd.c | 9 ------- drivers/block/zram/zram_drv.h | 1 - drivers/ide/ide-cd.c | 8 +++--- drivers/ide/ide-cd.h | 6 +---- drivers/nvdimm/nd.h | 1 - drivers/scsi/gdth.h | 3 --- include/linux/blkdev.h | 42 +++++++++++++++++++++-------- include/linux/device-mapper.h | 2 -- include/linux/ide.h | 1 - include/uapi/linux/msdos_fs.h | 2 ++ 12 files changed, 38 insertions(+), 39 deletions(-) diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index ede04cca30dd..82fb5102d824 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -21,7 +21,6 @@ #include #define SIMDISK_MAJOR 240 -#define SECTOR_SHIFT 9 #define SIMDISK_MINORS 1 #define MAX_SIMDISK_COUNT 10 diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 7e35574a17df..9d81ac8b4512 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -25,7 +25,6 @@ #include -#define SECTOR_SHIFT 9 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8a93ca4d6840..19f336752ad7 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -50,15 +50,6 @@ #define RBD_DEBUG /* Activate rbd_assert() calls */ -/* - * The basic unit of block I/O is a sector. It is interpreted in a - * number of contexts in Linux (blk, bio, genhd), but the default is - * universally 512 bytes. These symbols are just slightly more - * meaningful than the bare numbers they represent. - */ -#define SECTOR_SHIFT 9 -#define SECTOR_SIZE (1ULL << SECTOR_SHIFT) - /* * Increment the given counter and return its updated value. * If the counter is already 0 it will not be incremented. diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 74fcf10da374..6d2475a39e84 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -37,7 +37,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; /*-- End of configurable params */ -#define SECTOR_SHIFT 9 #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) #define ZRAM_LOGICAL_BLOCK_SHIFT 12 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 883fe2cdd42c..6e3b3a5a3c36 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -704,7 +704,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) struct request_queue *q = drive->queue; int write = rq_data_dir(rq) == WRITE; unsigned short sectors_per_frame = - queue_logical_block_size(q) >> SECTOR_BITS; + queue_logical_block_size(q) >> SECTOR_SHIFT; ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " "secs_per_frame: %u", @@ -900,7 +900,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, * end up being bogus. */ blocklen = be32_to_cpu(capbuf.blocklen); - blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS; + blocklen = (blocklen >> SECTOR_SHIFT) << SECTOR_SHIFT; switch (blocklen) { case 512: case 1024: @@ -916,7 +916,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, } *capacity = 1 + be32_to_cpu(capbuf.lba); - *sectors_per_frame = blocklen >> SECTOR_BITS; + *sectors_per_frame = blocklen >> SECTOR_SHIFT; ide_debug_log(IDE_DBG_PROBE, "cap: %lu, sectors_per_frame: %lu", *capacity, *sectors_per_frame); @@ -993,7 +993,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) drive->probed_capacity = toc->capacity * sectors_per_frame; blk_queue_logical_block_size(drive->queue, - sectors_per_frame << SECTOR_BITS); + sectors_per_frame << SECTOR_SHIFT); /* first read just the header, so we know how long the TOC is */ stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h index 1efc936f5b66..7c6d017e84e9 100644 --- a/drivers/ide/ide-cd.h +++ b/drivers/ide/ide-cd.h @@ -20,11 +20,7 @@ /************************************************************************/ -#define SECTOR_BITS 9 -#ifndef SECTOR_SIZE -#define SECTOR_SIZE (1 << SECTOR_BITS) -#endif -#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_BITS) +#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_SHIFT) #define SECTOR_BUFFER_SIZE (CD_FRAMESIZE * 32) /* Capabilities Page size including 8 bytes of Mode Page Header */ diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index bd29e598bac1..2a820c1fdfcd 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -29,7 +29,6 @@ enum { * BTT instance */ ND_MAX_LANES = 256, - SECTOR_SHIFT = 9, INT_LBASIZE_ALIGNMENT = 64, }; diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index 3fd8b83ffbf9..8039c809cef2 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h @@ -177,9 +177,6 @@ #define MSG_SIZE 34 /* size of message structure */ #define MSG_REQUEST 0 /* async. event: message */ -/* cacheservice defines */ -#define SECTOR_SIZE 0x200 /* always 512 bytes per sec. */ - /* DPMEM constants */ #define DPMEM_MAGIC 0xC0FFEE11 #define IC_HEADER_BYTES 48 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 060881478e59..848aab6c6982 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -850,6 +850,19 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) return bdev->bd_disk->queue; /* this is never NULL */ } +/* + * The basic unit of block I/O is a sector. It is used in a number of contexts + * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 + * bytes. Variables of type sector_t represent an offset or size that is a + * multiple of 512 bytes. Hence these two constants. + */ +#ifndef SECTOR_SHIFT +#define SECTOR_SHIFT 9 +#endif +#ifndef SECTOR_SIZE +#define SECTOR_SIZE (1 << SECTOR_SHIFT) +#endif + /* * blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request @@ -877,19 +890,20 @@ extern unsigned int blk_rq_err_bytes(const struct request *rq); static inline unsigned int blk_rq_sectors(const struct request *rq) { - return blk_rq_bytes(rq) >> 9; + return blk_rq_bytes(rq) >> SECTOR_SHIFT; } static inline unsigned int blk_rq_cur_sectors(const struct request *rq) { - return blk_rq_cur_bytes(rq) >> 9; + return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; } static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, int op) { if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) - return min(q->limits.max_discard_sectors, UINT_MAX >> 9); + return min(q->limits.max_discard_sectors, + UINT_MAX >> SECTOR_SHIFT); if (unlikely(op == REQ_OP_WRITE_SAME)) return q->limits.max_write_same_sectors; @@ -1162,16 +1176,21 @@ extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, static inline int sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) { - return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), - nr_blocks << (sb->s_blocksize_bits - 9), + return blkdev_issue_discard(sb->s_bdev, + block << (sb->s_blocksize_bits - + SECTOR_SHIFT), + nr_blocks << (sb->s_blocksize_bits - + SECTOR_SHIFT), gfp_mask, flags); } static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask) { return blkdev_issue_zeroout(sb->s_bdev, - block << (sb->s_blocksize_bits - 9), - nr_blocks << (sb->s_blocksize_bits - 9), + block << (sb->s_blocksize_bits - + SECTOR_SHIFT), + nr_blocks << (sb->s_blocksize_bits - + SECTOR_SHIFT), gfp_mask, true); } @@ -1278,7 +1297,8 @@ static inline int queue_alignment_offset(struct request_queue *q) static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) { unsigned int granularity = max(lim->physical_block_size, lim->io_min); - unsigned int alignment = sector_div(sector, granularity >> 9) << 9; + unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) + << SECTOR_SHIFT; return (granularity + lim->alignment_offset - alignment) % granularity; } @@ -1312,8 +1332,8 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector return 0; /* Why are these in bytes, not sectors? */ - alignment = lim->discard_alignment >> 9; - granularity = lim->discard_granularity >> 9; + alignment = lim->discard_alignment >> SECTOR_SHIFT; + granularity = lim->discard_granularity >> SECTOR_SHIFT; if (!granularity) return 0; @@ -1324,7 +1344,7 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector offset = (granularity + alignment - offset) % granularity; /* Turn it back into bytes, gaah */ - return offset << 9; + return offset << SECTOR_SHIFT; } static inline int bdev_discard_alignment(struct block_device *bdev) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 9661bb2fbe22..165ddd482f0d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -576,8 +576,6 @@ extern struct ratelimit_state dm_ratelimit_state; #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 0 : scnprintf(result + sz, maxlen - sz, x)) -#define SECTOR_SHIFT 9 - /* * Definitions of return values from target end_io function. */ diff --git a/include/linux/ide.h b/include/linux/ide.h index a633898f36ac..eb2ac48c99db 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -128,7 +128,6 @@ struct ide_io_ports { */ #define PARTN_BITS 6 /* number of minor dev bits for partitions */ #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ -#define SECTOR_SIZE 512 /* * Timeouts for various operations: diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h index e956704f5fb1..95b8a9395ec1 100644 --- a/include/uapi/linux/msdos_fs.h +++ b/include/uapi/linux/msdos_fs.h @@ -9,7 +9,9 @@ * The MS-DOS filesystem constants/structures */ +#ifndef SECTOR_SIZE #define SECTOR_SIZE 512 /* sector size (bytes) */ +#endif #define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */ #define MSDOS_DPB (MSDOS_DPS) /* dir entries per block */ #define MSDOS_DPB_BITS 4 /* log2(MSDOS_DPB) */ From 8c36cde29ad1e563def83c41094a96027b7a9b1b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 2 Sep 2020 12:32:45 -0400 Subject: [PATCH 45/72] libata: implement ATA_HORKAGE_MAX_TRIM_128M and apply to Sandisks commit 3b5455636fe26ea21b4189d135a424a6da016418 upstream. All three generations of Sandisk SSDs lock up hard intermittently. Experiments showed that disabling NCQ lowered the failure rate significantly and the kernel has been disabling NCQ for some models of SD7's and 8's, which is obviously undesirable. Karthik worked with Sandisk to root cause the hard lockups to trim commands larger than 128M. This patch implements ATA_HORKAGE_MAX_TRIM_128M which limits max trim size to 128M and applies it to all three generations of Sandisk SSDs. Signed-off-by: Tejun Heo Cc: Karthik Shivaram Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/ata/libata-core.c | 5 ++--- drivers/ata/libata-scsi.c | 8 +++++++- include/linux/libata.h | 1 + 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 46bf7e9d00ab..2aa10cd4c5b7 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4371,9 +4371,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, - /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on - SD7SN6S256G and SD8SN8U256G */ - { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, + /* Sandisk SD7/8/9s lock up hard on large trims */ + { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, }, /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index f4b38adb9d8a..76ba83e245c2 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2314,6 +2314,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) { + struct ata_device *dev = args->dev; u16 min_io_sectors; rbuf[1] = 0xb0; @@ -2339,7 +2340,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) * with the unmap bit set. */ if (ata_id_has_trim(args->id)) { - put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]); + u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; + + if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) + max_blocks = 128 << (20 - SECTOR_SHIFT); + + put_unaligned_be64(max_blocks, &rbuf[36]); put_unaligned_be32(1, &rbuf[28]); } diff --git a/include/linux/libata.h b/include/linux/libata.h index 780ccde2c312..e2dac33eae96 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -435,6 +435,7 @@ enum { ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ + ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ From 1c1630436a53ebf2cec68c120f2200422d89f0b0 Mon Sep 17 00:00:00 2001 From: Ye Bin Date: Tue, 1 Sep 2020 14:25:42 +0800 Subject: [PATCH 46/72] dm cache metadata: Avoid returning cmd->bm wild pointer on error commit d16ff19e69ab57e08bf908faaacbceaf660249de upstream. Maybe __create_persistent_data_objects() caller will use PTR_ERR as a pointer, it will lead to some strange things. Signed-off-by: Ye Bin Cc: stable@vger.kernel.org Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-cache-metadata.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 62eb4b7caff3..a9208ab12708 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -508,12 +508,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, CACHE_MAX_CONCURRENT_LOCKS); if (IS_ERR(cmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(cmd->bm); + r = PTR_ERR(cmd->bm); + cmd->bm = NULL; + return r; } r = __open_or_format_metadata(cmd, may_format_device); - if (r) + if (r) { dm_block_manager_destroy(cmd->bm); + cmd->bm = NULL; + } return r; } From 41718a868f611b386bc53d167518375563f456b1 Mon Sep 17 00:00:00 2001 From: Ye Bin Date: Tue, 1 Sep 2020 14:25:43 +0800 Subject: [PATCH 47/72] dm thin metadata: Avoid returning cmd->bm wild pointer on error commit 219403d7e56f9b716ad80ab87db85d29547ee73e upstream. Maybe __create_persistent_data_objects() caller will use PTR_ERR as a pointer, it will lead to some strange things. Signed-off-by: Ye Bin Cc: stable@vger.kernel.org Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-thin-metadata.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index d20f4023f6c1..b5bf2ecfaf91 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -700,12 +700,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f THIN_MAX_CONCURRENT_LOCKS); if (IS_ERR(pmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(pmd->bm); + r = PTR_ERR(pmd->bm); + pmd->bm = NULL; + return r; } r = __open_or_format_metadata(pmd, format_device); - if (r) + if (r) { dm_block_manager_destroy(pmd->bm); + pmd->bm = NULL; + } return r; } From fb9b5bc196be35bda0382a79c569b591777bf64b Mon Sep 17 00:00:00 2001 From: Eugeniu Rosca Date: Fri, 4 Sep 2020 16:35:30 -0700 Subject: [PATCH 48/72] mm: slub: fix conversion of freelist_corrupted() commit dc07a728d49cf025f5da2c31add438d839d076c0 upstream. Commit 52f23478081ae0 ("mm/slub.c: fix corrupted freechain in deactivate_slab()") suffered an update when picked up from LKML [1]. Specifically, relocating 'freelist = NULL' into 'freelist_corrupted()' created a no-op statement. Fix it by sticking to the behavior intended in the original patch [1]. In addition, make freelist_corrupted() immune to passing NULL instead of &freelist. The issue has been spotted via static analysis and code review. [1] https://lore.kernel.org/linux-mm/20200331031450.12182-1-dongli.zhang@oracle.com/ Fixes: 52f23478081ae0 ("mm/slub.c: fix corrupted freechain in deactivate_slab()") Signed-off-by: Eugeniu Rosca Signed-off-by: Andrew Morton Cc: Dongli Zhang Cc: Joe Jin Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Link: https://lkml.kernel.org/r/20200824130643.10291-1-erosca@de.adit-jv.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/slub.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 454c1d550ad2..51a73d2d1082 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -625,12 +625,12 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) } static bool freelist_corrupted(struct kmem_cache *s, struct page *page, - void *freelist, void *nextfree) + void **freelist, void *nextfree) { if ((s->flags & SLAB_CONSISTENCY_CHECKS) && - !check_valid_pointer(s, page, nextfree)) { - object_err(s, page, freelist, "Freechain corrupt"); - freelist = NULL; + !check_valid_pointer(s, page, nextfree) && freelist) { + object_err(s, page, *freelist, "Freechain corrupt"); + *freelist = NULL; slab_fix(s, "Isolate corrupted freechain"); return true; } @@ -1320,7 +1320,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} static bool freelist_corrupted(struct kmem_cache *s, struct page *page, - void *freelist, void *nextfree) + void **freelist, void *nextfree) { return false; } @@ -2040,7 +2040,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, * 'freelist' is already corrupted. So isolate all objects * starting at 'freelist'. */ - if (freelist_corrupted(s, page, freelist, nextfree)) + if (freelist_corrupted(s, page, &freelist, nextfree)) break; do { From 33c25edfe15d6f914a26e74dd23468d5417ea3a0 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Tue, 28 Apr 2020 17:02:24 -0600 Subject: [PATCH 49/72] vfio/type1: Support faulting PFNMAP vmas commit 41311242221e3482b20bfed10fa4d9db98d87016 upstream. With conversion to follow_pfn(), DMA mapping a PFNMAP range depends on the range being faulted into the vma. Add support to manually provide that, in the same way as done on KVM with hva_to_pfn_remapped(). Reviewed-by: Peter Xu Signed-off-by: Alex Williamson [Ajay: Regenerated the patch for v4.9] Signed-off-by: Ajay Kaher Signed-off-by: Greg Kroah-Hartman --- drivers/vfio/vfio_iommu_type1.c | 36 ++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index a9f58f3867f0..ccef02ceaad9 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -213,6 +213,32 @@ static int put_pfn(unsigned long pfn, int prot) return 0; } +static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ + int ret; + + ret = follow_pfn(vma, vaddr, pfn); + if (ret) { + bool unlocked = false; + + ret = fixup_user_fault(NULL, mm, vaddr, + FAULT_FLAG_REMOTE | + (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pfn(vma, vaddr, pfn); + } + + return ret; +} + static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) { struct page *page[1]; @@ -226,12 +252,16 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) down_read(¤t->mm->mmap_sem); +retry: vma = find_vma_intersection(current->mm, vaddr, vaddr + 1); if (vma && vma->vm_flags & VM_PFNMAP) { - if (!follow_pfn(vma, vaddr, pfn) && - is_invalid_reserved_pfn(*pfn)) - ret = 0; + ret = follow_fault_pfn(vma, current->mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret && !is_invalid_reserved_pfn(*pfn)) + ret = -EFAULT; } up_read(¤t->mm->mmap_sem); From de5dcf9226b35e498c67d43a840e5443d518ea90 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Tue, 28 Apr 2020 13:12:20 -0600 Subject: [PATCH 50/72] vfio-pci: Fault mmaps to enable vma tracking commit 11c4cd07ba111a09f49625f9e4c851d83daf0a22 upstream. Rather than calling remap_pfn_range() when a region is mmap'd, setup a vm_ops handler to support dynamic faulting of the range on access. This allows us to manage a list of vmas actively mapping the area that we can later use to invalidate those mappings. The open callback invalidates the vma range so that all tracking is inserted in the fault handler and removed in the close handler. Reviewed-by: Peter Xu Signed-off-by: Alex Williamson [Ajay: Regenerated the patch for v4.9] Signed-off-by: Ajay Kaher Signed-off-by: Greg Kroah-Hartman --- drivers/vfio/pci/vfio_pci.c | 75 ++++++++++++++++++++++++++++- drivers/vfio/pci/vfio_pci_private.h | 7 +++ 2 files changed, 80 insertions(+), 2 deletions(-) diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index c94167d87178..c97cf6cc1cd1 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -1144,6 +1144,69 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf, return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true); } +static int vfio_pci_add_vma(struct vfio_pci_device *vdev, + struct vm_area_struct *vma) +{ + struct vfio_pci_mmap_vma *mmap_vma; + + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); + if (!mmap_vma) + return -ENOMEM; + + mmap_vma->vma = vma; + + mutex_lock(&vdev->vma_lock); + list_add(&mmap_vma->vma_next, &vdev->vma_list); + mutex_unlock(&vdev->vma_lock); + + return 0; +} + +/* + * Zap mmaps on open so that we can fault them in on access and therefore + * our vma_list only tracks mappings accessed since last zap. + */ +static void vfio_pci_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void vfio_pci_mmap_close(struct vm_area_struct *vma) +{ + struct vfio_pci_device *vdev = vma->vm_private_data; + struct vfio_pci_mmap_vma *mmap_vma; + + mutex_lock(&vdev->vma_lock); + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { + if (mmap_vma->vma == vma) { + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + break; + } + } + mutex_unlock(&vdev->vma_lock); +} + +static int vfio_pci_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct vfio_pci_device *vdev = vma->vm_private_data; + + if (vfio_pci_add_vma(vdev, vma)) + return VM_FAULT_OOM; + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static const struct vm_operations_struct vfio_pci_mmap_ops = { + .open = vfio_pci_mmap_open, + .close = vfio_pci_mmap_close, + .fault = vfio_pci_mmap_fault, +}; + static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) { struct vfio_pci_device *vdev = device_data; @@ -1209,8 +1272,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; - return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - req_len, vma->vm_page_prot); + /* + * See remap_pfn_range(), called from vfio_pci_fault() but we can't + * change vm_flags within the fault handler. Set them now. + */ + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &vfio_pci_mmap_ops; + + return 0; } static void vfio_pci_request(void *device_data, unsigned int count) @@ -1268,6 +1337,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&vdev->igate); spin_lock_init(&vdev->irqlock); + mutex_init(&vdev->vma_lock); + INIT_LIST_HEAD(&vdev->vma_list); ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); if (ret) { vfio_iommu_group_put(group, &pdev->dev); diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index f561ac1c78a0..4b78f12216e4 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -63,6 +63,11 @@ struct vfio_pci_dummy_resource { struct list_head res_next; }; +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + struct vfio_pci_device { struct pci_dev *pdev; void __iomem *barmap[PCI_STD_RESOURCE_END + 1]; @@ -95,6 +100,8 @@ struct vfio_pci_device { struct eventfd_ctx *err_trigger; struct eventfd_ctx *req_trigger; struct list_head dummy_resources_list; + struct mutex vma_lock; + struct list_head vma_list; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) From 5f2c69e2ef24a79b6909a6dc6b249a17909965f8 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Wed, 22 Apr 2020 13:48:11 -0600 Subject: [PATCH 51/72] vfio-pci: Invalidate mmaps and block MMIO access on disabled memory commit abafbc551fddede3e0a08dee1dcde08fc0eb8476 upstream. Accessing the disabled memory space of a PCI device would typically result in a master abort response on conventional PCI, or an unsupported request on PCI express. The user would generally see these as a -1 response for the read return data and the write would be silently discarded, possibly with an uncorrected, non-fatal AER error triggered on the host. Some systems however take it upon themselves to bring down the entire system when they see something that might indicate a loss of data, such as this discarded write to a disabled memory space. To avoid this, we want to try to block the user from accessing memory spaces while they're disabled. We start with a semaphore around the memory enable bit, where writers modify the memory enable state and must be serialized, while readers make use of the memory region and can access in parallel. Writers include both direct manipulation via the command register, as well as any reset path where the internal mechanics of the reset may both explicitly and implicitly disable memory access, and manipulation of the MSI-X configuration, where the MSI-X vector table resides in MMIO space of the device. Readers include the read and write file ops to access the vfio device fd offsets as well as memory mapped access. In the latter case, we make use of our new vma list support to zap, or invalidate, those memory mappings in order to force them to be faulted back in on access. Our semaphore usage will stall user access to MMIO spaces across internal operations like reset, but the user might experience new behavior when trying to access the MMIO space while disabled via the PCI command register. Access via read or write while disabled will return -EIO and access via memory maps will result in a SIGBUS. This is expected to be compatible with known use cases and potentially provides better error handling capabilities than present in the hardware, while avoiding the more readily accessible and severe platform error responses that might otherwise occur. Fixes: CVE-2020-12888 Reviewed-by: Peter Xu Signed-off-by: Alex Williamson [Ajay: Regenerated the patch for v4.9] Signed-off-by: Ajay Kaher Signed-off-by: Greg Kroah-Hartman --- drivers/vfio/pci/vfio_pci.c | 294 ++++++++++++++++++++++++---- drivers/vfio/pci/vfio_pci_config.c | 36 +++- drivers/vfio/pci/vfio_pci_intrs.c | 14 ++ drivers/vfio/pci/vfio_pci_private.h | 9 + drivers/vfio/pci/vfio_pci_rdwr.c | 29 ++- 5 files changed, 334 insertions(+), 48 deletions(-) diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index c97cf6cc1cd1..2254c281cc76 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "vfio_pci_private.h" @@ -181,6 +182,7 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); static void vfio_pci_disable(struct vfio_pci_device *vdev); +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data); /* * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND @@ -656,6 +658,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, return 0; } +struct vfio_devices { + struct vfio_device **devices; + int cur_index; + int max_index; +}; + static long vfio_pci_ioctl(void *device_data, unsigned int cmd, unsigned long arg) { @@ -729,7 +737,7 @@ static long vfio_pci_ioctl(void *device_data, { void __iomem *io; size_t size; - u16 orig_cmd; + u16 cmd; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.flags = 0; @@ -749,10 +757,7 @@ static long vfio_pci_ioctl(void *device_data, * Is it really there? Enable memory decode for * implicit access in pci_map_rom(). */ - pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd); - pci_write_config_word(pdev, PCI_COMMAND, - orig_cmd | PCI_COMMAND_MEMORY); - + cmd = vfio_pci_memory_lock_and_enable(vdev); io = pci_map_rom(pdev, &size); if (io) { info.flags = VFIO_REGION_INFO_FLAG_READ; @@ -760,8 +765,8 @@ static long vfio_pci_ioctl(void *device_data, } else { info.size = 0; } + vfio_pci_memory_unlock_and_restore(vdev, cmd); - pci_write_config_word(pdev, PCI_COMMAND, orig_cmd); break; } case VFIO_PCI_VGA_REGION_INDEX: @@ -909,8 +914,16 @@ static long vfio_pci_ioctl(void *device_data, return ret; } else if (cmd == VFIO_DEVICE_RESET) { - return vdev->reset_works ? - pci_try_reset_function(vdev->pdev) : -EINVAL; + int ret; + + if (!vdev->reset_works) + return -EINVAL; + + vfio_pci_zap_and_down_write_memory_lock(vdev); + ret = pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + + return ret; } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { struct vfio_pci_hot_reset_info hdr; @@ -990,8 +1003,9 @@ static long vfio_pci_ioctl(void *device_data, int32_t *group_fds; struct vfio_pci_group_entry *groups; struct vfio_pci_group_info info; + struct vfio_devices devs = { .cur_index = 0 }; bool slot = false; - int i, count = 0, ret = 0; + int i, group_idx, mem_idx = 0, count = 0, ret = 0; minsz = offsetofend(struct vfio_pci_hot_reset, count); @@ -1043,9 +1057,9 @@ static long vfio_pci_ioctl(void *device_data, * user interface and store the group and iommu ID. This * ensures the group is held across the reset. */ - for (i = 0; i < hdr.count; i++) { + for (group_idx = 0; group_idx < hdr.count; group_idx++) { struct vfio_group *group; - struct fd f = fdget(group_fds[i]); + struct fd f = fdget(group_fds[group_idx]); if (!f.file) { ret = -EBADF; break; @@ -1058,8 +1072,9 @@ static long vfio_pci_ioctl(void *device_data, break; } - groups[i].group = group; - groups[i].id = vfio_external_user_iommu_id(group); + groups[group_idx].group = group; + groups[group_idx].id = + vfio_external_user_iommu_id(group); } kfree(group_fds); @@ -1078,14 +1093,65 @@ static long vfio_pci_ioctl(void *device_data, ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_validate_devs, &info, slot); - if (!ret) - /* User has access, do the reset */ - ret = slot ? pci_try_reset_slot(vdev->pdev->slot) : - pci_try_reset_bus(vdev->pdev->bus); + + if (ret) + goto hot_reset_release; + + devs.max_index = count; + devs.devices = kcalloc(count, sizeof(struct vfio_device *), + GFP_KERNEL); + if (!devs.devices) { + ret = -ENOMEM; + goto hot_reset_release; + } + + /* + * We need to get memory_lock for each device, but devices + * can share mmap_sem, therefore we need to zap and hold + * the vma_lock for each device, and only then get each + * memory_lock. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_try_zap_and_vma_lock_cb, + &devs, slot); + if (ret) + goto hot_reset_release; + + for (; mem_idx < devs.cur_index; mem_idx++) { + struct vfio_pci_device *tmp; + + tmp = vfio_device_data(devs.devices[mem_idx]); + + ret = down_write_trylock(&tmp->memory_lock); + if (!ret) { + ret = -EBUSY; + goto hot_reset_release; + } + mutex_unlock(&tmp->vma_lock); + } + + /* User has access, do the reset */ + ret = slot ? pci_try_reset_slot(vdev->pdev->slot) : + pci_try_reset_bus(vdev->pdev->bus); hot_reset_release: - for (i--; i >= 0; i--) - vfio_group_put_external_user(groups[i].group); + for (i = 0; i < devs.cur_index; i++) { + struct vfio_device *device; + struct vfio_pci_device *tmp; + + device = devs.devices[i]; + tmp = vfio_device_data(device); + + if (i < mem_idx) + up_write(&tmp->memory_lock); + else + mutex_unlock(&tmp->vma_lock); + vfio_device_put(device); + } + kfree(devs.devices); + + for (group_idx--; group_idx >= 0; group_idx--) + vfio_group_put_external_user(groups[group_idx].group); kfree(groups); return ret; @@ -1144,8 +1210,126 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf, return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true); } -static int vfio_pci_add_vma(struct vfio_pci_device *vdev, - struct vm_area_struct *vma) +/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ +static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) +{ + struct vfio_pci_mmap_vma *mmap_vma, *tmp; + + /* + * Lock ordering: + * vma_lock is nested under mmap_sem for vm_ops callback paths. + * The memory_lock semaphore is used by both code paths calling + * into this function to zap vmas and the vm_ops.fault callback + * to protect the memory enable state of the device. + * + * When zapping vmas we need to maintain the mmap_sem => vma_lock + * ordering, which requires using vma_lock to walk vma_list to + * acquire an mm, then dropping vma_lock to get the mmap_sem and + * reacquiring vma_lock. This logic is derived from similar + * requirements in uverbs_user_mmap_disassociate(). + * + * mmap_sem must always be the top-level lock when it is taken. + * Therefore we can only hold the memory_lock write lock when + * vma_list is empty, as we'd need to take mmap_sem to clear + * entries. vma_list can only be guaranteed empty when holding + * vma_lock, thus memory_lock is nested under vma_lock. + * + * This enables the vm_ops.fault callback to acquire vma_lock, + * followed by memory_lock read lock, while already holding + * mmap_sem without risk of deadlock. + */ + while (1) { + struct mm_struct *mm = NULL; + + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) + return 0; + } else { + mutex_lock(&vdev->vma_lock); + } + while (!list_empty(&vdev->vma_list)) { + mmap_vma = list_first_entry(&vdev->vma_list, + struct vfio_pci_mmap_vma, + vma_next); + mm = mmap_vma->vma->vm_mm; + if (mmget_not_zero(mm)) + break; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + mm = NULL; + } + if (!mm) + return 1; + mutex_unlock(&vdev->vma_lock); + + if (try) { + if (!down_read_trylock(&mm->mmap_sem)) { + mmput(mm); + return 0; + } + } else { + down_read(&mm->mmap_sem); + } + if (mmget_still_valid(mm)) { + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) { + up_read(&mm->mmap_sem); + mmput(mm); + return 0; + } + } else { + mutex_lock(&vdev->vma_lock); + } + list_for_each_entry_safe(mmap_vma, tmp, + &vdev->vma_list, vma_next) { + struct vm_area_struct *vma = mmap_vma->vma; + + if (vma->vm_mm != mm) + continue; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + + zap_vma_ptes(vma, vma->vm_start, + vma->vm_end - vma->vm_start); + } + mutex_unlock(&vdev->vma_lock); + } + up_read(&mm->mmap_sem); + mmput(mm); + } +} + +void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev) +{ + vfio_pci_zap_and_vma_lock(vdev, false); + down_write(&vdev->memory_lock); + mutex_unlock(&vdev->vma_lock); +} + +u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev) +{ + u16 cmd; + + down_write(&vdev->memory_lock); + pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MEMORY)) + pci_write_config_word(vdev->pdev, PCI_COMMAND, + cmd | PCI_COMMAND_MEMORY); + + return cmd; +} + +void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd) +{ + pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd); + up_write(&vdev->memory_lock); +} + +/* Caller holds vma_lock */ +static int __vfio_pci_add_vma(struct vfio_pci_device *vdev, + struct vm_area_struct *vma) { struct vfio_pci_mmap_vma *mmap_vma; @@ -1154,10 +1338,7 @@ static int vfio_pci_add_vma(struct vfio_pci_device *vdev, return -ENOMEM; mmap_vma->vma = vma; - - mutex_lock(&vdev->vma_lock); list_add(&mmap_vma->vma_next, &vdev->vma_list); - mutex_unlock(&vdev->vma_lock); return 0; } @@ -1190,15 +1371,32 @@ static void vfio_pci_mmap_close(struct vm_area_struct *vma) static int vfio_pci_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct vfio_pci_device *vdev = vma->vm_private_data; + int ret = VM_FAULT_NOPAGE; + + mutex_lock(&vdev->vma_lock); + down_read(&vdev->memory_lock); + + if (!__vfio_pci_memory_enabled(vdev)) { + ret = VM_FAULT_SIGBUS; + mutex_unlock(&vdev->vma_lock); + goto up_out; + } + + if (__vfio_pci_add_vma(vdev, vma)) { + ret = VM_FAULT_OOM; + mutex_unlock(&vdev->vma_lock); + goto up_out; + } - if (vfio_pci_add_vma(vdev, vma)) - return VM_FAULT_OOM; + mutex_unlock(&vdev->vma_lock); if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot)) - return VM_FAULT_SIGBUS; + ret = VM_FAULT_SIGBUS; - return VM_FAULT_NOPAGE; +up_out: + up_read(&vdev->memory_lock); + return ret; } static const struct vm_operations_struct vfio_pci_mmap_ops = { @@ -1339,6 +1537,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&vdev->vma_lock); INIT_LIST_HEAD(&vdev->vma_list); + init_rwsem(&vdev->memory_lock); ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); if (ret) { vfio_iommu_group_put(group, &pdev->dev); @@ -1432,12 +1631,6 @@ static struct pci_driver vfio_pci_driver = { .err_handler = &vfio_err_handlers, }; -struct vfio_devices { - struct vfio_device **devices; - int cur_index; - int max_index; -}; - static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) { struct vfio_devices *devs = data; @@ -1459,6 +1652,39 @@ static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) return 0; } +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data) +{ + struct vfio_devices *devs = data; + struct vfio_device *device; + struct vfio_pci_device *vdev; + + if (devs->cur_index == devs->max_index) + return -ENOSPC; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return -EINVAL; + + if (pci_dev_driver(pdev) != &vfio_pci_driver) { + vfio_device_put(device); + return -EBUSY; + } + + vdev = vfio_device_data(device); + + /* + * Locking multiple devices is prone to deadlock, runaway and + * unwind if we hit contention. + */ + if (!vfio_pci_zap_and_vma_lock(vdev, true)) { + vfio_device_put(device); + return -EBUSY; + } + + devs->devices[devs->cur_index++] = device; + return 0; +} + /* * Attempt to do a bus/slot reset if there are devices affected by a reset for * this device that are needs_reset and all of the affected devices are unused diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index ef45b8f5bf51..0cb8b574ed7a 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -400,6 +400,14 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) *(__le32 *)(&p->write[off]) = cpu_to_le32(write); } +/* Caller should hold memory_lock semaphore */ +bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) +{ + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); + + return cmd & PCI_COMMAND_MEMORY; +} + /* * Restore the *real* BARs after we detect a FLR or backdoor reset. * (backdoor = some device specific technique that we didn't catch) @@ -560,13 +568,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, new_cmd = le32_to_cpu(val); + phys_io = !!(phys_cmd & PCI_COMMAND_IO); + virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); + new_io = !!(new_cmd & PCI_COMMAND_IO); + phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); - phys_io = !!(phys_cmd & PCI_COMMAND_IO); - virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); - new_io = !!(new_cmd & PCI_COMMAND_IO); + if (!new_mem) + vfio_pci_zap_and_down_write_memory_lock(vdev); + else + down_write(&vdev->memory_lock); /* * If the user is writing mem/io enable (new_mem/io) and we @@ -583,8 +596,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, } count = vfio_default_config_write(vdev, pos, count, perm, offset, val); - if (count < 0) + if (count < 0) { + if (offset == PCI_COMMAND) + up_write(&vdev->memory_lock); return count; + } /* * Save current memory/io enable bits in vconfig to allow for @@ -595,6 +611,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, *virt_cmd &= cpu_to_le16(~mask); *virt_cmd |= cpu_to_le16(new_cmd & mask); + + up_write(&vdev->memory_lock); } /* Emulate INTx disable */ @@ -832,8 +850,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_EXP_DEVCAP, &cap); - if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) + if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } /* @@ -911,8 +932,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_AF_CAP, &cap); - if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) + if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } return count; diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 94594dc63c41..bdfdd506bc58 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -252,6 +252,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) struct pci_dev *pdev = vdev->pdev; unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; int ret; + u16 cmd; if (!is_irq_none(vdev)) return -EINVAL; @@ -261,13 +262,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) return -ENOMEM; /* return the number of supported vectors if we can't get all: */ + cmd = vfio_pci_memory_lock_and_enable(vdev); ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); if (ret < nvec) { if (ret > 0) pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); kfree(vdev->ctx); return ret; } + vfio_pci_memory_unlock_and_restore(vdev, cmd); vdev->num_ctx = nvec; vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : @@ -290,6 +294,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, struct pci_dev *pdev = vdev->pdev; struct eventfd_ctx *trigger; int irq, ret; + u16 cmd; if (vector < 0 || vector >= vdev->num_ctx) return -EINVAL; @@ -298,7 +303,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, if (vdev->ctx[vector].trigger) { irq_bypass_unregister_producer(&vdev->ctx[vector].producer); + + cmd = vfio_pci_memory_lock_and_enable(vdev); free_irq(irq, vdev->ctx[vector].trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + kfree(vdev->ctx[vector].name); eventfd_ctx_put(vdev->ctx[vector].trigger); vdev->ctx[vector].trigger = NULL; @@ -326,6 +335,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, * such a reset it would be unsuccessful. To avoid this, restore the * cached value of the message prior to enabling. */ + cmd = vfio_pci_memory_lock_and_enable(vdev); if (msix) { struct msi_msg msg; @@ -335,6 +345,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, ret = request_irq(irq, vfio_msihandler, 0, vdev->ctx[vector].name, trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); if (ret) { kfree(vdev->ctx[vector].name); eventfd_ctx_put(trigger); @@ -379,6 +390,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) { struct pci_dev *pdev = vdev->pdev; int i; + u16 cmd; for (i = 0; i < vdev->num_ctx; i++) { vfio_virqfd_disable(&vdev->ctx[i].unmask); @@ -387,7 +399,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); + cmd = vfio_pci_memory_lock_and_enable(vdev); pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); /* * Both disable paths above use pci_intx_for_msi() to clear DisINTx diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 4b78f12216e4..f896cebb5c2c 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -102,6 +102,7 @@ struct vfio_pci_device { struct list_head dummy_resources_list; struct mutex vma_lock; struct list_head vma_list; + struct rw_semaphore memory_lock; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) @@ -137,6 +138,14 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, unsigned int type, unsigned int subtype, const struct vfio_pci_regops *ops, size_t size, u32 flags, void *data); + +extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev); +extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device + *vdev); +extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev); +extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, + u16 cmd); + #ifdef CONFIG_VFIO_PCI_IGD extern int vfio_pci_igd_init(struct vfio_pci_device *vdev); #else diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 357243d76f10..6445461a5601 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -122,6 +122,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, size_t x_start = 0, x_end = 0; resource_size_t end; void __iomem *io; + struct resource *res = &vdev->pdev->resource[bar]; ssize_t done; if (pci_resource_start(pdev, bar)) @@ -137,6 +138,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, count = min(count, (size_t)(end - pos)); + if (res->flags & IORESOURCE_MEM) { + down_read(&vdev->memory_lock); + if (!__vfio_pci_memory_enabled(vdev)) { + up_read(&vdev->memory_lock); + return -EIO; + } + } + if (bar == PCI_ROM_RESOURCE) { /* * The ROM can fill less space than the BAR, so we start the @@ -144,20 +153,21 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, * filling large ROM BARs much faster. */ io = pci_map_rom(pdev, &x_start); - if (!io) - return -ENOMEM; + if (!io) { + done = -ENOMEM; + goto out; + } x_end = end; } else if (!vdev->barmap[bar]) { - int ret; - - ret = pci_request_selected_regions(pdev, 1 << bar, "vfio"); - if (ret) - return ret; + done = pci_request_selected_regions(pdev, 1 << bar, "vfio"); + if (done) + goto out; io = pci_iomap(pdev, bar, 0); if (!io) { pci_release_selected_regions(pdev, 1 << bar); - return -ENOMEM; + done = -ENOMEM; + goto out; } vdev->barmap[bar] = io; @@ -176,6 +186,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, if (bar == PCI_ROM_RESOURCE) pci_unmap_rom(pdev, io); +out: + if (res->flags & IORESOURCE_MEM) + up_read(&vdev->memory_lock); return done; } From f8890efdeac2bb064619be05fc3ceb0c6326c5f4 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 4 Sep 2020 12:28:57 +0100 Subject: [PATCH 52/72] KVM: arm64: Add kvm_extable for vaxorcism code commit e9ee186bb735bfc17fa81dbc9aebf268aee5b41e upstream. KVM has a one instruction window where it will allow an SError exception to be consumed by the hypervisor without treating it as a hypervisor bug. This is used to consume asynchronous external abort that were caused by the guest. As we are about to add another location that survives unexpected exceptions, generalise this code to make it behave like the host's extable. KVM's version has to be mapped to EL2 to be accessible on nVHE systems. The SError vaxorcism code is a one instruction window, so has two entries in the extable. Because the KVM code is copied for VHE and nVHE, we end up with four entries, half of which correspond with code that isn't mapped. Cc: stable@vger.kernel.org # v4.9 Signed-off-by: James Morse Reviewed-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Andre Przywara Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/kvm_asm.h | 15 ++++++++++ arch/arm64/kernel/vmlinux.lds.S | 8 +++++ arch/arm64/kvm/hyp/entry.S | 16 ++++++---- arch/arm64/kvm/hyp/hyp-entry.S | 51 ++++++++++++++++++++------------ arch/arm64/kvm/hyp/switch.c | 31 +++++++++++++++++++ 5 files changed, 96 insertions(+), 25 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 8f5cf83b2339..f6a22f07fa84 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -106,6 +106,21 @@ extern u32 __init_stage2_translation(void); kern_hyp_va \vcpu .endm +/* + * KVM extable for unexpected exceptions. + * In the same format _asm_extable, but output to a different section so that + * it can be mapped to EL2. The KVM version is not sorted. The caller must + * ensure: + * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented + * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. + */ +.macro _kvm_extable, from, to + .pushsection __kvm_ex_table, "a" + .align 3 + .long (\from - .), (\to - .) + .popsection +.endm + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 6a584558b29d..fa3ffad50a61 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -23,6 +23,13 @@ ENTRY(_text) jiffies = jiffies_64; + +#define HYPERVISOR_EXTABLE \ + . = ALIGN(SZ_8); \ + VMLINUX_SYMBOL(__start___kvm_ex_table) = .; \ + *(__kvm_ex_table) \ + VMLINUX_SYMBOL(__stop___kvm_ex_table) = .; + #define HYPERVISOR_TEXT \ /* \ * Align to 4 KB so that \ @@ -38,6 +45,7 @@ jiffies = jiffies_64; VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ VMLINUX_SYMBOL(__hyp_text_start) = .; \ *(.hyp.text) \ + HYPERVISOR_EXTABLE \ VMLINUX_SYMBOL(__hyp_text_end) = .; #define IDMAP_TEXT \ diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index a360ac6e89e9..76545f95f704 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -135,18 +135,22 @@ ENTRY(__guest_exit) // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. - .global abort_guest_exit_start abort_guest_exit_start: isb - .global abort_guest_exit_end abort_guest_exit_end: + msr daifset, #4 // Mask aborts + ret - // If the exception took place, restore the EL1 exception - // context so that we can report some information. - // Merge the exception code with the SError pending bit. - tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f + _kvm_extable abort_guest_exit_start, 9997f + _kvm_extable abort_guest_exit_end, 9997f +9997: + msr daifset, #4 // Mask aborts + mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) + + // restore the EL1 exception context so that we can report some + // information. Merge the exception code with the SError pending bit. msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index bf4988f9dae8..ee2a1b4b4e39 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -25,6 +25,30 @@ #include #include +.macro save_caller_saved_regs_vect + stp x0, x1, [sp, #-16]! + stp x2, x3, [sp, #-16]! + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! +.endm + +.macro restore_caller_saved_regs_vect + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +.endm + .text .pushsection .hyp.text, "ax" @@ -178,25 +202,14 @@ el1_error: b __guest_exit el2_error: - /* - * Only two possibilities: - * 1) Either we come from the exit path, having just unmasked - * PSTATE.A: change the return code to an EL2 fault, and - * carry on, as we're already in a sane state to handle it. - * 2) Or we come from anywhere else, and that's a bug: we panic. - * - * For (1), x0 contains the original return code and x1 doesn't - * contain anything meaningful at that stage. We can reuse them - * as temp registers. - * For (2), who cares? - */ - mrs x0, elr_el2 - adr x1, abort_guest_exit_start - cmp x0, x1 - adr x1, abort_guest_exit_end - ccmp x0, x1, #4, ne - b.ne __hyp_panic - mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + + bl kvm_unexpected_el2_exception + + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect + eret ENTRY(__hyp_do_panic) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index ed7e3a288b4e..0d3d66cc3841 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -25,6 +25,10 @@ #include #include #include +#include + +extern struct exception_table_entry __start___kvm_ex_table; +extern struct exception_table_entry __stop___kvm_ex_table; static bool __hyp_text __fpsimd_enabled_nvhe(void) { @@ -454,3 +458,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) unreachable(); } + +asmlinkage void __hyp_text kvm_unexpected_el2_exception(void) +{ + unsigned long addr, fixup; + struct kvm_cpu_context *host_ctxt; + struct exception_table_entry *entry, *end; + unsigned long elr_el2 = read_sysreg(elr_el2); + + entry = hyp_symbol_addr(__start___kvm_ex_table); + end = hyp_symbol_addr(__stop___kvm_ex_table); + host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state); + + while (entry < end) { + addr = (unsigned long)&entry->insn + entry->insn; + fixup = (unsigned long)&entry->fixup + entry->fixup; + + if (addr != elr_el2) { + entry++; + continue; + } + + write_sysreg(fixup, elr_el2); + return; + } + + hyp_panic(host_ctxt); +} From b9ca3f9b8643444ad4a69b9a43f8c7573c3a0136 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 4 Sep 2020 12:28:58 +0100 Subject: [PATCH 53/72] KVM: arm64: Defer guest entry when an asynchronous exception is pending commit 5dcd0fdbb492d49dac6bf21c436dfcb5ded0a895 upstream. SError that occur during world-switch's entry to the guest will be accounted to the guest, as the exception is masked until we enter the guest... but we want to attribute the SError as precisely as possible. Reading DISR_EL1 before guest entry requires free registers, and using ESB+DISR_EL1 to consume and read back the ESR would leave KVM holding a host SError... We would rather leave the SError pending and let the host take it once we exit world-switch. To do this, we need to defer guest-entry if an SError is pending. Read the ISR to see if SError (or an IRQ) is pending. If so fake an exit. Place this check between __guest_enter()'s save of the host registers, and restore of the guest's. SError that occur between here and the eret into the guest must have affected the guest's registers, which we can naturally attribute to the guest. The dsb is needed to ensure any previous writes have been done before we read ISR_EL1. On systems without the v8.2 RAS extensions this doesn't give us anything as we can't contain errors, and the ESR bits to describe the severity are all implementation-defined. Replace this with a nop for these systems. v4.9-backport: as this kernel version doesn't have the RAS support at all, remove the RAS alternative. Cc: stable@vger.kernel.org # v4.9 Signed-off-by: James Morse Signed-off-by: Marc Zyngier [ James: Removed v8.2 RAS related barriers ] Signed-off-by: James Morse Signed-off-by: Andre Przywara Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kvm/hyp/entry.S | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 76545f95f704..4e0eac361f87 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -17,6 +17,7 @@ #include +#include #include #include #include @@ -62,6 +63,15 @@ ENTRY(__guest_enter) // Store the host regs save_callee_saved_regs x1 + // Now the host state is stored if we have a pending RAS SError it must + // affect the host. If any asynchronous exception is pending we defer + // the guest entry. + mrs x1, isr_el1 + cbz x1, 1f + mov x0, #ARM_EXCEPTION_IRQ + ret + +1: add x18, x0, #VCPU_CONTEXT // Restore guest regs x0-x17 From ce749a04ba871e5cbc8a6c665e0b250ff6be1563 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 4 Sep 2020 12:28:59 +0100 Subject: [PATCH 54/72] KVM: arm64: Survive synchronous exceptions caused by AT instructions commit 88a84ccccb3966bcc3f309cdb76092a9892c0260 upstream. KVM doesn't expect any synchronous exceptions when executing, any such exception leads to a panic(). AT instructions access the guest page tables, and can cause a synchronous external abort to be taken. The arm-arm is unclear on what should happen if the guest has configured the hardware update of the access-flag, and a memory type in TCR_EL1 that does not support atomic operations. B2.2.6 "Possible implementation restrictions on using atomic instructions" from DDI0487F.a lists synchronous external abort as a possible behaviour of atomic instructions that target memory that isn't writeback cacheable, but the page table walker may behave differently. Make KVM robust to synchronous exceptions caused by AT instructions. Add a get_user() style helper for AT instructions that returns -EFAULT if an exception was generated. While KVM's version of the exception table mixes synchronous and asynchronous exceptions, only one of these can occur at each location. Re-enter the guest when the AT instructions take an exception on the assumption the guest will take the same exception. This isn't guaranteed to make forward progress, as the AT instructions may always walk the page tables, but guest execution may use the translation cached in the TLB. This isn't a problem, as since commit 5dcd0fdbb492 ("KVM: arm64: Defer guest entry when an asynchronous exception is pending"), KVM will return to the host to process IRQs allowing the rest of the system to keep running. Cc: stable@vger.kernel.org # v4.9 Signed-off-by: James Morse Reviewed-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Andre Przywara Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/kvm_asm.h | 28 ++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/hyp-entry.S | 12 ++++++++++-- arch/arm64/kvm/hyp/switch.c | 8 ++++---- 3 files changed, 42 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index f6a22f07fa84..3d2fddac25b9 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -82,6 +82,34 @@ extern u32 __init_stage2_translation(void); *__hyp_this_cpu_ptr(sym); \ }) +#define __KVM_EXTABLE(from, to) \ + " .pushsection __kvm_ex_table, \"a\"\n" \ + " .align 3\n" \ + " .long (" #from " - .), (" #to " - .)\n" \ + " .popsection\n" + + +#define __kvm_at(at_op, addr) \ +( { \ + int __kvm_at_err = 0; \ + u64 spsr, elr; \ + asm volatile( \ + " mrs %1, spsr_el2\n" \ + " mrs %2, elr_el2\n" \ + "1: at "at_op", %3\n" \ + " isb\n" \ + " b 9f\n" \ + "2: msr spsr_el2, %1\n" \ + " msr elr_el2, %2\n" \ + " mov %w0, %4\n" \ + "9:\n" \ + __KVM_EXTABLE(1b, 2b) \ + : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ + : "r" (addr), "i" (-EFAULT)); \ + __kvm_at_err; \ +} ) + + #else /* __ASSEMBLY__ */ .macro hyp_adr_this_cpu reg, sym, tmp diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index ee2a1b4b4e39..7ced1fb93d07 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -201,6 +201,15 @@ el1_error: mov x0, #ARM_EXCEPTION_EL1_SERROR b __guest_exit +el2_sync: + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + bl kvm_unexpected_el2_exception + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect + + eret + el2_error: save_caller_saved_regs_vect stp x29, x30, [sp, #-16]! @@ -238,7 +247,6 @@ ENDPROC(\label) invalid_vector el2t_irq_invalid invalid_vector el2t_fiq_invalid invalid_vector el2t_error_invalid - invalid_vector el2h_sync_invalid invalid_vector el2h_irq_invalid invalid_vector el2h_fiq_invalid invalid_vector el1_sync_invalid @@ -255,7 +263,7 @@ ENTRY(__kvm_hyp_vector) ventry el2t_fiq_invalid // FIQ EL2t ventry el2t_error_invalid // Error EL2t - ventry el2h_sync_invalid // Synchronous EL2h + ventry el2_sync // Synchronous EL2h ventry el2h_irq_invalid // IRQ EL2h ventry el2h_fiq_invalid // FIQ EL2h ventry el2_error // Error EL2h diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 0d3d66cc3841..0a2f37bceab0 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -206,10 +206,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) * saved the guest context yet, and we may return early... */ par = read_sysreg(par_el1); - asm volatile("at s1e1r, %0" : : "r" (far)); - isb(); - - tmp = read_sysreg(par_el1); + if (!__kvm_at("s1e1r", far)) + tmp = read_sysreg(par_el1); + else + tmp = 1; /* back to the guest */ write_sysreg(par, par_el1); if (unlikely(tmp & 1)) From 0696d08a50ae19e272e61b30e5968c13256f073e Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 4 Sep 2020 12:29:00 +0100 Subject: [PATCH 55/72] KVM: arm64: Set HCR_EL2.PTW to prevent AT taking synchronous exception commit 71a7f8cb1ca4ca7214a700b1243626759b6c11d4 upstream. AT instructions do a translation table walk and return the result, or the fault in PAR_EL1. KVM uses these to find the IPA when the value is not provided by the CPU in HPFAR_EL1. If a translation table walk causes an external abort it is taken as an exception, even if it was due to an AT instruction. (DDI0487F.a's D5.2.11 "Synchronous faults generated by address translation instructions") While we previously made KVM resilient to exceptions taken due to AT instructions, the device access causes mismatched attributes, and may occur speculatively. Prevent this, by forbidding a walk through memory described as device at stage2. Now such AT instructions will report a stage2 fault. Such a fault will cause KVM to restart the guest. If the AT instructions always walk the page tables, but guest execution uses the translation cached in the TLB, the guest can't make forward progress until the TLB entry is evicted. This isn't a problem, as since commit 5dcd0fdbb492 ("KVM: arm64: Defer guest entry when an asynchronous exception is pending"), KVM will return to the host to process IRQs allowing the rest of the system to keep running. Cc: stable@vger.kernel.org # v4.9 Signed-off-by: James Morse Reviewed-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Andre Przywara Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/kvm_arm.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index a11c8c2915c9..e8cb69b0cf4f 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -78,10 +78,11 @@ * IMO: Override CPSR.I and enable signaling with VI * FMO: Override CPSR.F and enable signaling with VF * SWIO: Turn set/way invalidates into set/way clean+invalidate + * PTW: Take a stage2 fault if a stage1 walk steps in device memory */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ - HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW) + HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_PTW) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) From f4461490c38105bc040fb9ef5966c949da483d0e Mon Sep 17 00:00:00 2001 From: Tim Froidcoeur Date: Tue, 11 Aug 2020 20:33:23 +0200 Subject: [PATCH 56/72] net: refactor bind_bucket fastreuse into helper commit 62ffc589abb176821662efc4525ee4ac0b9c3894 upstream. Refactor the fastreuse update code in inet_csk_get_port into a small helper function that can be called from other places. Acked-by: Matthieu Baerts Signed-off-by: Tim Froidcoeur Signed-off-by: David S. Miller Signed-off-by: Tim Froidcoeur Signed-off-by: Greg Kroah-Hartman --- include/net/inet_connection_sock.h | 4 ++++ net/ipv4/inet_connection_sock.c | 37 ++++++++++++++++++++---------- 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 146054ceea8e..5bb56ebf3c9f 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -319,5 +319,9 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); +/* update the fast reuse flag when adding a socket */ +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk); + struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); #endif /* _INET_CONNECTION_SOCK_H */ diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 1bcbb7399fe6..5a0352ccadd3 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -89,6 +89,28 @@ int inet_csk_bind_conflict(const struct sock *sk, } EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk) +{ + kuid_t uid = sock_i_uid(sk); + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; + + if (!hlist_empty(&tb->owners)) { + if (!reuse) + tb->fastreuse = 0; + if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)) + tb->fastreuseport = 0; + } else { + tb->fastreuse = reuse; + if (sk->sk_reuseport) { + tb->fastreuseport = 1; + tb->fastuid = uid; + } else { + tb->fastreuseport = 0; + } + } +} + /* Obtain a reference to a local port for the given sock, * if snum is zero it means select any available local port. * We try to allocate an odd port (and leave even ports for connect()) @@ -218,19 +240,10 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) } goto fail_unlock; } - if (!reuse) - tb->fastreuse = 0; - if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)) - tb->fastreuseport = 0; - } else { - tb->fastreuse = reuse; - if (sk->sk_reuseport) { - tb->fastreuseport = 1; - tb->fastuid = uid; - } else { - tb->fastreuseport = 0; - } } + + inet_csk_update_fastreuse(tb, sk); + success: if (!inet_csk(sk)->icsk_bind_hash) inet_bind_hash(sk, tb, port); From d370e957788316dbf08ed22446f3eed6821918cd Mon Sep 17 00:00:00 2001 From: Tim Froidcoeur Date: Tue, 11 Aug 2020 20:33:24 +0200 Subject: [PATCH 57/72] net: initialize fastreuse on inet_inherit_port commit d76f3351cea2d927fdf70dd7c06898235035e84e upstream. In the case of TPROXY, bind_conflict optimizations for SO_REUSEADDR or SO_REUSEPORT are broken, possibly resulting in O(n) instead of O(1) bind behaviour or in the incorrect reuse of a bind. the kernel keeps track for each bind_bucket if all sockets in the bind_bucket support SO_REUSEADDR or SO_REUSEPORT in two fastreuse flags. These flags allow skipping the costly bind_conflict check when possible (meaning when all sockets have the proper SO_REUSE option). For every socket added to a bind_bucket, these flags need to be updated. As soon as a socket that does not support reuse is added, the flag is set to false and will never go back to true, unless the bind_bucket is deleted. Note that there is no mechanism to re-evaluate these flags when a socket is removed (this might make sense when removing a socket that would not allow reuse; this leaves room for a future patch). For this optimization to work, it is mandatory that these flags are properly initialized and updated. When a child socket is created from a listen socket in __inet_inherit_port, the TPROXY case could create a new bind bucket without properly initializing these flags, thus preventing the optimization to work. Alternatively, a socket not allowing reuse could be added to an existing bind bucket without updating the flags, causing bind_conflict to never be called as it should. Call inet_csk_update_fastreuse when __inet_inherit_port decides to create a new bind_bucket or use a different bind_bucket than the one of the listen socket. Fixes: 093d282321da ("tproxy: fix hash locking issue when using port redirection in __inet_inherit_port()") Acked-by: Matthieu Baerts Signed-off-by: Tim Froidcoeur Signed-off-by: David S. Miller Signed-off-by: Tim Froidcoeur Signed-off-by: Greg Kroah-Hartman --- net/ipv4/inet_hashtables.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 4bf542f4d980..887633870763 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) return -ENOMEM; } } + inet_csk_update_fastreuse(tb, child); } inet_bind_hash(child, tb, port); spin_unlock(&head->lock); From 54764e8dfbd4fd2bf615aa5a2445ce7faa216cbc Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Thu, 25 Jun 2020 11:04:23 -0600 Subject: [PATCH 58/72] vfio/pci: Fix SR-IOV VF handling with MMIO blocking commit ebfa440ce38b7e2e04c3124aa89c8a9f4094cf21 upstream. SR-IOV VFs do not implement the memory enable bit of the command register, therefore this bit is not set in config space after pci_enable_device(). This leads to an unintended difference between PF and VF in hand-off state to the user. We can correct this by setting the initial value of the memory enable bit in our virtualized config space. There's really no need however to ever fault a user on a VF though as this would only indicate an error in the user's management of the enable bit, versus a PF where the same access could trigger hardware faults. Fixes: abafbc551fdd ("vfio-pci: Invalidate mmaps and block MMIO access on disabled memory") Signed-off-by: Alex Williamson Signed-off-by: Greg Kroah-Hartman --- drivers/vfio/pci/vfio_pci_config.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 0cb8b574ed7a..f3c2de04b20d 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -403,9 +403,15 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) /* Caller should hold memory_lock semaphore */ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) { + struct pci_dev *pdev = vdev->pdev; u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); - return cmd & PCI_COMMAND_MEMORY; + /* + * SR-IOV VF memory enable is handled by the MSE bit in the + * PF SR-IOV capability, there's therefore no need to trigger + * faults based on the virtual value. + */ + return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); } /* @@ -1729,6 +1735,15 @@ int vfio_config_init(struct vfio_pci_device *vdev) vconfig[PCI_INTERRUPT_PIN]); vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ + + /* + * VFs do no implement the memory enable bit of the COMMAND + * register therefore we'll not have it set in our initial + * copy of config space after pci_enable_device(). For + * consistency with PFs, set the virtual enable bit here. + */ + *(__le16 *)&vconfig[PCI_COMMAND] |= + cpu_to_le16(PCI_COMMAND_MEMORY); } if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) From 913412eda13d2797bb49461854829132afab0682 Mon Sep 17 00:00:00 2001 From: Mrinal Pandey Date: Fri, 4 Sep 2020 16:35:52 -0700 Subject: [PATCH 59/72] checkpatch: fix the usage of capture group ( ... ) commit 13e45417cedbfc44b1926124b1846f5ee8c6ba4a upstream. The usage of "capture group (...)" in the immediate condition after `&&` results in `$1` being uninitialized. This issues a warning "Use of uninitialized value $1 in regexp compilation at ./scripts/checkpatch.pl line 2638". I noticed this bug while running checkpatch on the set of commits from v5.7 to v5.8-rc1 of the kernel on the commits with a diff content in their commit message. This bug was introduced in the script by commit e518e9a59ec3 ("checkpatch: emit an error when there's a diff in a changelog"). It has been in the script since then. The author intended to store the match made by capture group in variable `$1`. This should have contained the name of the file as `[\w/]+` matched. However, this couldn't be accomplished due to usage of capture group and `$1` in the same regular expression. Fix this by placing the capture group in the condition before `&&`. Thus, `$1` can be initialized to the text that capture group matches thereby setting it to the desired and required value. Fixes: e518e9a59ec3 ("checkpatch: emit an error when there's a diff in a changelog") Signed-off-by: Mrinal Pandey Signed-off-by: Andrew Morton Tested-by: Lukas Bulwahn Reviewed-by: Lukas Bulwahn Cc: Joe Perches Link: https://lkml.kernel.org/r/20200714032352.f476hanaj2dlmiot@mrinalpandey Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- scripts/checkpatch.pl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 55171647f516..9432387dc178 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2375,8 +2375,8 @@ sub process { # Check if the commit log has what seems like a diff which can confuse patch if ($in_commit_log && !$commit_log_has_diff && - (($line =~ m@^\s+diff\b.*a/[\w/]+@ && - $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) || + (($line =~ m@^\s+diff\b.*a/([\w/]+)@ && + $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) || $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ || $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) { ERROR("DIFF_IN_COMMIT_MSG", From 47b1be395352d54f888f4331fa291dc9199fff4c Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Fri, 4 Sep 2020 16:36:13 -0700 Subject: [PATCH 60/72] mm/hugetlb: fix a race between hugetlb sysctl handlers commit 17743798d81238ab13050e8e2833699b54e15467 upstream. There is a race between the assignment of `table->data` and write value to the pointer of `table->data` in the __do_proc_doulongvec_minmax() on the other thread. CPU0: CPU1: proc_sys_write hugetlb_sysctl_handler proc_sys_call_handler hugetlb_sysctl_handler_common hugetlb_sysctl_handler table->data = &tmp; hugetlb_sysctl_handler_common table->data = &tmp; proc_doulongvec_minmax do_proc_doulongvec_minmax sysctl_head_finish __do_proc_doulongvec_minmax unuse_table i = table->data; *i = val; // corrupt CPU1's stack Fix this by duplicating the `table`, and only update the duplicate of it. And introduce a helper of proc_hugetlb_doulongvec_minmax() to simplify the code. The following oops was seen: BUG: kernel NULL pointer dereference, address: 0000000000000000 #PF: supervisor instruction fetch in kernel mode #PF: error_code(0x0010) - not-present page Code: Bad RIP value. ... Call Trace: ? set_max_huge_pages+0x3da/0x4f0 ? alloc_pool_huge_page+0x150/0x150 ? proc_doulongvec_minmax+0x46/0x60 ? hugetlb_sysctl_handler_common+0x1c7/0x200 ? nr_hugepages_store+0x20/0x20 ? copy_fd_bitmaps+0x170/0x170 ? hugetlb_sysctl_handler+0x1e/0x20 ? proc_sys_call_handler+0x2f1/0x300 ? unregister_sysctl_table+0xb0/0xb0 ? __fd_install+0x78/0x100 ? proc_sys_write+0x14/0x20 ? __vfs_write+0x4d/0x90 ? vfs_write+0xef/0x240 ? ksys_write+0xc0/0x160 ? __ia32_sys_read+0x50/0x50 ? __close_fd+0x129/0x150 ? __x64_sys_write+0x43/0x50 ? do_syscall_64+0x6c/0x200 ? entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: e5ff215941d5 ("hugetlb: multiple hstates for multiple page sizes") Signed-off-by: Muchun Song Signed-off-by: Andrew Morton Reviewed-by: Mike Kravetz Cc: Andi Kleen Link: http://lkml.kernel.org/r/20200828031146.43035-1-songmuchun@bytedance.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/hugetlb.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2c22ea7a2013..b469d099dc5f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2921,6 +2921,22 @@ static unsigned int cpuset_mems_nr(unsigned int *array) } #ifdef CONFIG_SYSCTL +static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, + void *buffer, size_t *length, + loff_t *ppos, unsigned long *out) +{ + struct ctl_table dup_table; + + /* + * In order to avoid races with __do_proc_doulongvec_minmax(), we + * can duplicate the @table and alter the duplicate of it. + */ + dup_table = *table; + dup_table.data = out; + + return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); +} + static int hugetlb_sysctl_handler_common(bool obey_mempolicy, struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) @@ -2932,9 +2948,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, if (!hugepages_supported()) return -EOPNOTSUPP; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; @@ -2978,9 +2993,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, if (write && hstate_is_gigantic(h)) return -EINVAL; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; From 2e34f0fcc673374f7918d462ab905ffe7f2c0cc4 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 19 Aug 2020 10:46:48 +0200 Subject: [PATCH 61/72] cfg80211: regulatory: reject invalid hints commit 47caf685a6854593348f216e0b489b71c10cbe03 upstream. Reject invalid hints early in order to not cause a kernel WARN later if they're restored to or similar. Reported-by: syzbot+d451401ffd00a60677ee@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=d451401ffd00a60677ee Link: https://lore.kernel.org/r/20200819084648.13956-1-johannes@sipsolutions.net Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/wireless/reg.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 6d5f3f737207..a649763b854d 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2321,6 +2321,9 @@ int regulatory_hint_user(const char *alpha2, if (WARN_ON(!alpha2)) return -EINVAL; + if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2)) + return -EINVAL; + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; From 968ec616e7074b379954e7d93fc22a6ecb631e37 Mon Sep 17 00:00:00 2001 From: Himadri Pandya Date: Thu, 27 Aug 2020 12:23:55 +0530 Subject: [PATCH 62/72] net: usb: Fix uninit-was-stored issue in asix_read_phy_addr() commit a092b7233f0e000cc6f2c71a49e2ecc6f917a5fc upstream. The buffer size is 2 Bytes and we expect to receive the same amount of data. But sometimes we receive less data and run into uninit-was-stored issue upon read. Hence modify the error check on the return value to match with the buffer size as a prevention. Reported-and-tested by: syzbot+a7e220df5a81d1ab400e@syzkaller.appspotmail.com Signed-off-by: Himadri Pandya Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/asix_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 3dbb0646b024..541c06c884e5 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -277,7 +277,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal) netdev_dbg(dev->net, "asix_get_phy_addr()\n"); - if (ret < 0) { + if (ret < 2) { netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret); goto out; } From ad2eb69aefa38bf31d0136051407dbafa3b52228 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sun, 23 Aug 2020 16:55:37 +0900 Subject: [PATCH 63/72] ALSA; firewire-tascam: exclude Tascam FE-8 from detection Tascam FE-8 is known to support communication by asynchronous transaction only. The support can be implemented in userspace application and snd-firewire-ctl-services project has the support. However, ALSA firewire-tascam driver is bound to the model. This commit changes device entries so that the model is excluded. In a commit 53b3ffee7885 ("ALSA: firewire-tascam: change device probing processing"), I addressed to the concern that version field in configuration differs depending on installed firmware. However, as long as I checked, the version number is fixed. It's safe to return version number back to modalias. Fixes: 53b3ffee7885 ("ALSA: firewire-tascam: change device probing processing") Cc: # 4.4+ Signed-off-by: Takashi Sakamoto Link: https://lore.kernel.org/r/20200823075537.56255-1-o-takashi@sakamocchi.jp Signed-off-by: Takashi Iwai --- sound/firewire/tascam/tascam.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c index 4c967ac1c0e8..40ed4c92e48b 100644 --- a/sound/firewire/tascam/tascam.c +++ b/sound/firewire/tascam/tascam.c @@ -225,11 +225,39 @@ static void snd_tscm_remove(struct fw_unit *unit) } static const struct ieee1394_device_id snd_tscm_id_table[] = { + // Tascam, FW-1884. { .match_flags = IEEE1394_MATCH_VENDOR_ID | - IEEE1394_MATCH_SPECIFIER_ID, + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, .vendor_id = 0x00022e, .specifier_id = 0x00022e, + .version = 0x800000, + }, + // Tascam, FE-8 (.version = 0x800001) + // This kernel module doesn't support FE-8 because the most of features + // can be implemented in userspace without any specific support of this + // module. + // + // .version = 0x800002 is unknown. + // + // Tascam, FW-1082. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800003, + }, + // Tascam, FW-1804. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800004, }, /* FE-08 requires reverse-engineering because it just has faders. */ {} From f0c44a60acdd95852d982491cf03938853d7bebd Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Mon, 27 Feb 2017 14:27:52 -0800 Subject: [PATCH 64/72] fs/affs: use octal for permissions [ Upstream commit 1bafd6f164d9ec9bf2fa9829051fbeb36342be0b ] According to commit f90774e1fd27 ("checkpatch: look for symbolic permissions and suggest octal instead") Link: http://lkml.kernel.org/r/20170109191208.6085-5-fabf@skynet.be Signed-off-by: Fabian Frederick Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- fs/affs/amigaffs.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index 0ec65c133b93..fd7a7542956d 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -391,23 +391,23 @@ prot_to_mode(u32 prot) umode_t mode = 0; if (!(prot & FIBF_NOWRITE)) - mode |= S_IWUSR; + mode |= 0200; if (!(prot & FIBF_NOREAD)) - mode |= S_IRUSR; + mode |= 0400; if (!(prot & FIBF_NOEXECUTE)) - mode |= S_IXUSR; + mode |= 0100; if (prot & FIBF_GRP_WRITE) - mode |= S_IWGRP; + mode |= 0020; if (prot & FIBF_GRP_READ) - mode |= S_IRGRP; + mode |= 0040; if (prot & FIBF_GRP_EXECUTE) - mode |= S_IXGRP; + mode |= 0010; if (prot & FIBF_OTR_WRITE) - mode |= S_IWOTH; + mode |= 0002; if (prot & FIBF_OTR_READ) - mode |= S_IROTH; + mode |= 0004; if (prot & FIBF_OTR_EXECUTE) - mode |= S_IXOTH; + mode |= 0001; return mode; } @@ -418,23 +418,23 @@ mode_to_prot(struct inode *inode) u32 prot = AFFS_I(inode)->i_protect; umode_t mode = inode->i_mode; - if (!(mode & S_IXUSR)) + if (!(mode & 0100)) prot |= FIBF_NOEXECUTE; - if (!(mode & S_IRUSR)) + if (!(mode & 0400)) prot |= FIBF_NOREAD; - if (!(mode & S_IWUSR)) + if (!(mode & 0200)) prot |= FIBF_NOWRITE; - if (mode & S_IXGRP) + if (mode & 0010) prot |= FIBF_GRP_EXECUTE; - if (mode & S_IRGRP) + if (mode & 0040) prot |= FIBF_GRP_READ; - if (mode & S_IWGRP) + if (mode & 0020) prot |= FIBF_GRP_WRITE; - if (mode & S_IXOTH) + if (mode & 0001) prot |= FIBF_OTR_EXECUTE; - if (mode & S_IROTH) + if (mode & 0004) prot |= FIBF_OTR_READ; - if (mode & S_IWOTH) + if (mode & 0002) prot |= FIBF_OTR_WRITE; AFFS_I(inode)->i_protect = prot; From e942ed86e5252fe206ef0c83f14bce331aa43d70 Mon Sep 17 00:00:00 2001 From: Max Staudt Date: Thu, 27 Aug 2020 17:49:00 +0200 Subject: [PATCH 65/72] affs: fix basic permission bits to actually work [ Upstream commit d3a84a8d0dde4e26bc084b36ffcbdc5932ac85e2 ] The basic permission bits (protection bits in AmigaOS) have been broken in Linux' AFFS - it would only set bits, but never delete them. Also, contrary to the documentation, the Archived bit was not handled. Let's fix this for good, and set the bits such that Linux and classic AmigaOS can coexist in the most peaceful manner. Also, update the documentation to represent the current state of things. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable@vger.kernel.org Signed-off-by: Max Staudt Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- Documentation/filesystems/affs.txt | 16 ++++++++++------ fs/affs/amigaffs.c | 27 +++++++++++++++++++++++++++ fs/affs/file.c | 26 +++++++++++++++++++++++++- 3 files changed, 62 insertions(+), 7 deletions(-) diff --git a/Documentation/filesystems/affs.txt b/Documentation/filesystems/affs.txt index 71b63c2b9841..a8f1a58e3692 100644 --- a/Documentation/filesystems/affs.txt +++ b/Documentation/filesystems/affs.txt @@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows: - R maps to r for user, group and others. On directories, R implies x. - - If both W and D are allowed, w will be set. + - W maps to w. - E maps to x. - - H and P are always retained and ignored under Linux. + - D is ignored. - - A is always reset when a file is written to. + - H, S and P are always retained and ignored under Linux. + + - A is cleared when a file is written to. User id and group id will be used unless set[gu]id are given as mount options. Since most of the Amiga file systems are single user systems @@ -111,11 +113,13 @@ Linux -> Amiga: The Linux rwxrwxrwx file mode is handled as follows: - - r permission will set R for user, group and others. + - r permission will allow R for user, group and others. + + - w permission will allow W for user, group and others. - - w permission will set W and D for user, group and others. + - x permission of the user will allow E for plain files. - - x permission of the user will set E for plain files. + - D will be allowed for user, group and others. - All other flags (suid, sgid, ...) are ignored and will not be retained. diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index fd7a7542956d..e57f12317ab6 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -418,24 +418,51 @@ mode_to_prot(struct inode *inode) u32 prot = AFFS_I(inode)->i_protect; umode_t mode = inode->i_mode; + /* + * First, clear all RWED bits for owner, group, other. + * Then, recalculate them afresh. + * + * We'll always clear the delete-inhibit bit for the owner, as that is + * the classic single-user mode AmigaOS protection bit and we need to + * stay compatible with all scenarios. + * + * Since multi-user AmigaOS is an extension, we'll only set the + * delete-allow bit if any of the other bits in the same user class + * (group/other) are used. + */ + prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD + | FIBF_NOWRITE | FIBF_NODELETE + | FIBF_GRP_EXECUTE | FIBF_GRP_READ + | FIBF_GRP_WRITE | FIBF_GRP_DELETE + | FIBF_OTR_EXECUTE | FIBF_OTR_READ + | FIBF_OTR_WRITE | FIBF_OTR_DELETE); + + /* Classic single-user AmigaOS flags. These are inverted. */ if (!(mode & 0100)) prot |= FIBF_NOEXECUTE; if (!(mode & 0400)) prot |= FIBF_NOREAD; if (!(mode & 0200)) prot |= FIBF_NOWRITE; + + /* Multi-user extended flags. Not inverted. */ if (mode & 0010) prot |= FIBF_GRP_EXECUTE; if (mode & 0040) prot |= FIBF_GRP_READ; if (mode & 0020) prot |= FIBF_GRP_WRITE; + if (mode & 0070) + prot |= FIBF_GRP_DELETE; + if (mode & 0001) prot |= FIBF_OTR_EXECUTE; if (mode & 0004) prot |= FIBF_OTR_READ; if (mode & 0002) prot |= FIBF_OTR_WRITE; + if (mode & 0007) + prot |= FIBF_OTR_DELETE; AFFS_I(inode)->i_protect = prot; } diff --git a/fs/affs/file.c b/fs/affs/file.c index 0deec9cc2362..0daca9d00cd8 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -427,6 +427,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping, return ret; } +static int affs_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + int ret; + + ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); + + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + + return ret; +} + static sector_t _affs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,affs_get_block); @@ -436,7 +454,7 @@ const struct address_space_operations affs_aops = { .readpage = affs_readpage, .writepage = affs_writepage, .write_begin = affs_write_begin, - .write_end = generic_write_end, + .write_end = affs_write_end, .direct_IO = affs_direct_IO, .bmap = _affs_bmap }; @@ -793,6 +811,12 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + err_first_bh: unlock_page(page); put_page(page); From ce391ac20e6308fab5819560aba6e01cccdd354e Mon Sep 17 00:00:00 2001 From: Shung-Hsi Yu Date: Mon, 31 Aug 2020 22:37:09 +0800 Subject: [PATCH 66/72] net: ethernet: mlx4: Fix memory allocation in mlx4_buddy_init() commit cbedcb044e9cc4e14bbe6658111224bb923094f4 upstream. On machines with much memory (> 2 TByte) and log_mtts_per_seg == 0, a max_order of 31 will be passed to mlx_buddy_init(), which results in s = BITS_TO_LONGS(1 << 31) becoming a negative value, leading to kvmalloc_array() failure when it is converted to size_t. mlx4_core 0000:b1:00.0: Failed to initialize memory region table, aborting mlx4_core: probe of 0000:b1:00.0 failed with error -12 Fix this issue by changing the left shifting operand from a signed literal to an unsigned one. Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters") Signed-off-by: Shung-Hsi Yu Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx4/mr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 3637474cab8a..50683693d9fc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + s = BITS_TO_LONGS(1UL << (buddy->max_order - i)); buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); if (!buddy->bits[i]) { buddy->bits[i] = vzalloc(s * sizeof(long)); From 23603e69bc6c95c846f3d1d93affaa24c18cf9f0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Aug 2020 12:40:07 -0700 Subject: [PATCH 67/72] bnxt: don't enable NAPI until rings are ready commit 96ecdcc992eb7f468b2cf829b0f5408a1fad4668 upstream. Netpoll can try to poll napi as soon as napi_enable() is called. It crashes trying to access a doorbell which is still NULL: BUG: kernel NULL pointer dereference, address: 0000000000000000 CPU: 59 PID: 6039 Comm: ethtool Kdump: loaded Tainted: G S 5.9.0-rc1-00469-g5fd99b5d9950-dirty #26 RIP: 0010:bnxt_poll+0x121/0x1c0 Code: c4 20 44 89 e0 5b 5d 41 5c 41 5d 41 5e 41 5f c3 41 8b 86 a0 01 00 00 41 23 85 18 01 00 00 49 8b 96 a8 01 00 00 0d 00 00 00 24 <89> 02 41 f6 45 77 02 74 cb 49 8b ae d8 01 00 00 31 c0 c7 44 24 1a netpoll_poll_dev+0xbd/0x1a0 __netpoll_send_skb+0x1b2/0x210 netpoll_send_udp+0x2c9/0x406 write_ext_msg+0x1d7/0x1f0 console_unlock+0x23c/0x520 vprintk_emit+0xe0/0x1d0 printk+0x58/0x6f x86_vector_activate.cold+0xf/0x46 __irq_domain_activate_irq+0x50/0x80 __irq_domain_activate_irq+0x32/0x80 __irq_domain_activate_irq+0x32/0x80 irq_domain_activate_irq+0x25/0x40 __setup_irq+0x2d2/0x700 request_threaded_irq+0xfb/0x160 __bnxt_open_nic+0x3b1/0x750 bnxt_open_nic+0x19/0x30 ethtool_set_channels+0x1ac/0x220 dev_ethtool+0x11ba/0x2240 dev_ioctl+0x1cf/0x390 sock_do_ioctl+0x95/0x130 Reported-by: Rob Sherwood Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") Signed-off-by: Jakub Kicinski Reviewed-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f451be63ab7e..dc34cfa2a58f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5589,14 +5589,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } } - bnxt_enable_napi(bp); - rc = bnxt_init_nic(bp, irq_re_init); if (rc) { netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); - goto open_err; + goto open_err_irq; } + bnxt_enable_napi(bp); + if (link_re_init) { mutex_lock(&bp->link_lock); rc = bnxt_update_phy_setting(bp); @@ -5618,9 +5618,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) return 0; -open_err: - bnxt_disable_napi(bp); - open_err_irq: bnxt_del_napi(bp); From a542f4fe6e353af48bb28f4f4d24b003ce1661f6 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Fri, 21 Aug 2020 16:34:52 -0400 Subject: [PATCH 68/72] netlabel: fix problems with mapping removal [ Upstream commit d3b990b7f327e2afa98006e7666fb8ada8ed8683 ] This patch fixes two main problems seen when removing NetLabel mappings: memory leaks and potentially extra audit noise. The memory leaks are caused by not properly free'ing the mapping's address selector struct when free'ing the entire entry as well as not properly cleaning up a temporary mapping entry when adding new address selectors to an existing entry. This patch fixes both these problems such that kmemleak reports no NetLabel associated leaks after running the SELinux test suite. The potentially extra audit noise was caused by the auditing code in netlbl_domhsh_remove_entry() being called regardless of the entry's validity. If another thread had already marked the entry as invalid, but not removed/free'd it from the list of mappings, then it was possible that an additional mapping removal audit record would be generated. This patch fixes this by returning early from the removal function when the entry was previously marked invalid. This change also had the side benefit of improving the code by decreasing the indentation level of large chunk of code by one (accounting for most of the diffstat). Fixes: 63c416887437 ("netlabel: Add network address selectors to the NetLabel/LSM domain mapping") Reported-by: Stephen Smalley Signed-off-by: Paul Moore Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/netlabel/netlabel_domainhash.c | 59 +++++++++++++++--------------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index 41d0e95d171e..b1a1718495f3 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -99,6 +99,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) kfree(netlbl_domhsh_addr6_entry(iter6)); } #endif /* IPv6 */ + kfree(ptr->def.addrsel); } kfree(ptr->domain); kfree(ptr); @@ -550,6 +551,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, goto add_return; } #endif /* IPv6 */ + /* cleanup the new entry since we've moved everything over */ + netlbl_domhsh_free_entry(&entry->rcu); } else ret_val = -EINVAL; @@ -593,6 +596,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, { int ret_val = 0; struct audit_buffer *audit_buf; + struct netlbl_af4list *iter4; + struct netlbl_domaddr4_map *map4; +#if IS_ENABLED(CONFIG_IPV6) + struct netlbl_af6list *iter6; + struct netlbl_domaddr6_map *map6; +#endif /* IPv6 */ if (entry == NULL) return -ENOENT; @@ -610,6 +619,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, ret_val = -ENOENT; spin_unlock(&netlbl_domhsh_lock); + if (ret_val) + return ret_val; + audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, @@ -619,40 +631,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, audit_log_end(audit_buf); } - if (ret_val == 0) { - struct netlbl_af4list *iter4; - struct netlbl_domaddr4_map *map4; -#if IS_ENABLED(CONFIG_IPV6) - struct netlbl_af6list *iter6; - struct netlbl_domaddr6_map *map6; -#endif /* IPv6 */ - - switch (entry->def.type) { - case NETLBL_NLTYPE_ADDRSELECT: - netlbl_af4list_foreach_rcu(iter4, - &entry->def.addrsel->list4) { - map4 = netlbl_domhsh_addr4_entry(iter4); - cipso_v4_doi_putdef(map4->def.cipso); - } + switch (entry->def.type) { + case NETLBL_NLTYPE_ADDRSELECT: + netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) { + map4 = netlbl_domhsh_addr4_entry(iter4); + cipso_v4_doi_putdef(map4->def.cipso); + } #if IS_ENABLED(CONFIG_IPV6) - netlbl_af6list_foreach_rcu(iter6, - &entry->def.addrsel->list6) { - map6 = netlbl_domhsh_addr6_entry(iter6); - calipso_doi_putdef(map6->def.calipso); - } + netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) { + map6 = netlbl_domhsh_addr6_entry(iter6); + calipso_doi_putdef(map6->def.calipso); + } #endif /* IPv6 */ - break; - case NETLBL_NLTYPE_CIPSOV4: - cipso_v4_doi_putdef(entry->def.cipso); - break; + break; + case NETLBL_NLTYPE_CIPSOV4: + cipso_v4_doi_putdef(entry->def.cipso); + break; #if IS_ENABLED(CONFIG_IPV6) - case NETLBL_NLTYPE_CALIPSO: - calipso_doi_putdef(entry->def.calipso); - break; + case NETLBL_NLTYPE_CALIPSO: + calipso_doi_putdef(entry->def.calipso); + break; #endif /* IPv6 */ - } - call_rcu(&entry->rcu, netlbl_domhsh_free_entry); } + call_rcu(&entry->rcu, netlbl_domhsh_free_entry); return ret_val; } From 3412e0266322b706f21caceecbea4981b9356852 Mon Sep 17 00:00:00 2001 From: Kamil Lorenc Date: Tue, 1 Sep 2020 10:57:38 +0200 Subject: [PATCH 69/72] net: usb: dm9601: Add USB ID of Keenetic Plus DSL [ Upstream commit a609d0259183a841621f252e067f40f8cc25d6f6 ] Keenetic Plus DSL is a xDSL modem that uses dm9620 as its USB interface. Signed-off-by: Kamil Lorenc Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/dm9601.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 0b4bdd39106b..fb18801d0fe7 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -624,6 +624,10 @@ static const struct usb_device_id products[] = { USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */ .driver_info = (unsigned long)&dm9601_info, }, + { + USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */ + .driver_info = (unsigned long)&dm9601_info, + }, {}, // END }; From 20e63db77149deb52d76ff9c8e7ddd3349c390ea Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 21 Aug 2020 14:59:38 +0800 Subject: [PATCH 70/72] sctp: not disable bh in the whole sctp_get_port_local() [ Upstream commit 3106ecb43a05dc3e009779764b9da245a5d082de ] With disabling bh in the whole sctp_get_port_local(), when snum == 0 and too many ports have been used, the do-while loop will take the cpu for a long time and cause cpu stuck: [ ] watchdog: BUG: soft lockup - CPU#11 stuck for 22s! [ ] RIP: 0010:native_queued_spin_lock_slowpath+0x4de/0x940 [ ] Call Trace: [ ] _raw_spin_lock+0xc1/0xd0 [ ] sctp_get_port_local+0x527/0x650 [sctp] [ ] sctp_do_bind+0x208/0x5e0 [sctp] [ ] sctp_autobind+0x165/0x1e0 [sctp] [ ] sctp_connect_new_asoc+0x355/0x480 [sctp] [ ] __sctp_connect+0x360/0xb10 [sctp] There's no need to disable bh in the whole function of sctp_get_port_local. So fix this cpu stuck by removing local_bh_disable() called at the beginning, and using spin_lock_bh() instead. The same thing was actually done for inet_csk_get_port() in Commit ea8add2b1903 ("tcp/dccp: better use of ephemeral ports in bind()"). Thanks to Marcelo for pointing the buggy code out. v1->v2: - use cond_resched() to yield cpu to other tasks if needed, as Eric noticed. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: Ying Xu Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 95f39dde1e08..c0fe647dd4ac 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -6687,8 +6687,6 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) pr_debug("%s: begins, snum:%d\n", __func__, snum); - local_bh_disable(); - if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; @@ -6707,20 +6705,21 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) continue; index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(sock_net(sk), pp->net)) goto next; break; next: - spin_unlock(&head->lock); + spin_unlock_bh(&head->lock); + cond_resched(); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) - goto fail; + return ret; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's @@ -6735,7 +6734,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; @@ -6819,10 +6818,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ret = 0; fail_unlock: - spin_unlock(&head->lock); - -fail: - local_bh_enable(); + spin_unlock_bh(&head->lock); return ret; } From 6048d57b66d724a3aa0faf52bf3eb2126396450f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Aug 2020 12:40:06 -0700 Subject: [PATCH 71/72] net: disable netpoll on fresh napis [ Upstream commit 96e97bc07e90f175a8980a22827faf702ca4cb30 ] napi_disable() makes sure to set the NAPI_STATE_NPSVC bit to prevent netpoll from accessing rings before init is complete. However, the same is not done for fresh napi instances in netif_napi_add(), even though we expect NAPI instances to be added as disabled. This causes crashes during driver reconfiguration (enabling XDP, changing the channel count) - if there is any printk() after netif_napi_add() but before napi_enable(). To ensure memory ordering is correct we need to use RCU accessors. Reported-by: Rob Sherwood Fixes: 2d8bff12699a ("netpoll: Close race condition between poll_one_napi and napi_disable") Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/dev.c | 3 ++- net/core/netpoll.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index dd8d36feb69f..9ac591dd16d5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5188,13 +5188,14 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, pr_err_once("netif_napi_add() called with weight %d on device %s\n", weight, dev->name); napi->weight = weight; - list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; #ifdef CONFIG_NETPOLL spin_lock_init(&napi->poll_lock); napi->poll_owner = -1; #endif set_bit(NAPI_STATE_SCHED, &napi->state); + set_bit(NAPI_STATE_NPSVC, &napi->state); + list_add_rcu(&napi->dev_list, &dev->napi_list); napi_hash_add(napi); } EXPORT_SYMBOL(netif_napi_add); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 5de180a9b7f5..9c1bad3909bd 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -178,7 +178,7 @@ static void poll_napi(struct net_device *dev) { struct napi_struct *napi; - list_for_each_entry(napi, &dev->napi_list, dev_list) { + list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { if (napi->poll_owner != smp_processor_id() && spin_trylock(&napi->poll_lock)) { poll_one_napi(napi); From 65676505f8fbee350e75a9d4b933d2857cb17c4d Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 12 Sep 2020 11:47:40 +0200 Subject: [PATCH 72/72] Linux 4.9.236 Tested-by: Jon Hunter Tested-by: Shuah Khan Tested-by: Guenter Roeck Tested-by: Linux Kernel Functional Testing Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d21084a36bd4..a454c9cd126e 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 9 -SUBLEVEL = 235 +SUBLEVEL = 236 EXTRAVERSION = NAME = Roaring Lionus